mirror of
https://github.com/catlog22/Claude-Code-Workflow.git
synced 2026-02-06 01:54:11 +08:00
Compare commits
1042 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21d764127f | ||
|
|
860dbdab56 | ||
|
|
113dce55c5 | ||
|
|
0b791c03cf | ||
|
|
bbc94fb73a | ||
|
|
f5e435f791 | ||
|
|
86d5be8288 | ||
|
|
9762445876 | ||
|
|
b791c09476 | ||
|
|
26283e7a5a | ||
|
|
1040459fef | ||
|
|
0fe8c18a82 | ||
|
|
0086413f95 | ||
|
|
8ff698ae73 | ||
|
|
8cdd6a8b5f | ||
|
|
b86a8afd8b | ||
|
|
53bd5a6d4b | ||
|
|
3a7bbe0e42 | ||
|
|
04a84f9893 | ||
|
|
11638facf7 | ||
|
|
4d93ffb06c | ||
|
|
204cb20617 | ||
|
|
63f0daebbb | ||
|
|
6ac041c1d8 | ||
|
|
279adfd391 | ||
|
|
0a07138c27 | ||
|
|
a5d9e8ca87 | ||
|
|
502c8a09a1 | ||
|
|
ed0255b8a2 | ||
|
|
6e94fc0740 | ||
|
|
b361a8c041 | ||
|
|
24dad8cefd | ||
|
|
071c98d89c | ||
|
|
994718dee2 | ||
|
|
3998d24e32 | ||
|
|
29274ee943 | ||
|
|
46d5739935 | ||
|
|
152cab2b7e | ||
|
|
0cc5101c0e | ||
|
|
4c78f53bcc | ||
|
|
cc5a5716cf | ||
|
|
af05874510 | ||
|
|
7a40f16235 | ||
|
|
8d178feaac | ||
|
|
b3c47294e7 | ||
|
|
9989cfcf21 | ||
|
|
1b6ace0447 | ||
|
|
a3b303d8e3 | ||
|
|
0c1c87f704 | ||
|
|
985085c624 | ||
|
|
7c16cc6427 | ||
|
|
6875108dda | ||
|
|
9cff6f5f43 | ||
|
|
fe2536d4cd | ||
|
|
16f27c080a | ||
|
|
874b70726d | ||
|
|
862365ffaf | ||
|
|
b8c807b2f9 | ||
|
|
7ea6362c50 | ||
|
|
b435391f17 | ||
|
|
88ff109ac4 | ||
|
|
261196a804 | ||
|
|
ea6cb8440f | ||
|
|
bf896342f4 | ||
|
|
f2b0a5bbc9 | ||
|
|
cf5fecd66d | ||
|
|
86d469ccc9 | ||
|
|
357d3524f5 | ||
|
|
4334162ddf | ||
|
|
2dcd1637f0 | ||
|
|
38e1cdc737 | ||
|
|
097a7346b9 | ||
|
|
9df8063fbd | ||
|
|
d00f0bc7ca | ||
|
|
24efef7f17 | ||
|
|
44b8269a74 | ||
|
|
dd51837bbc | ||
|
|
a17edc3e50 | ||
|
|
01ab3cf3fa | ||
|
|
a2c1b9b47c | ||
|
|
780e118844 | ||
|
|
159dfd179e | ||
|
|
6c80168612 | ||
|
|
a293a01d85 | ||
|
|
ab259b1970 | ||
|
|
fd50adf581 | ||
|
|
24a28f289d | ||
|
|
e727a07fc5 | ||
|
|
8179472e56 | ||
|
|
277b3f86f1 | ||
|
|
7a6f4c3f22 | ||
|
|
2f32d08d87 | ||
|
|
79d20add43 | ||
|
|
f363c635f5 | ||
|
|
61e3747768 | ||
|
|
54ec6a7c57 | ||
|
|
d6a3da2084 | ||
|
|
b9f17f0fcf | ||
|
|
88eb42f65b | ||
|
|
b1ac0cf8ff | ||
|
|
09eeb84cda | ||
|
|
2fb1d1243c | ||
|
|
ac62bf70db | ||
|
|
edb55c4895 | ||
|
|
8a7f636a85 | ||
|
|
97ab82628d | ||
|
|
be89552b0a | ||
|
|
df25b43884 | ||
|
|
04cd536da5 | ||
|
|
9a3608173a | ||
|
|
f5b6bb97bc | ||
|
|
2819f3597f | ||
|
|
c0c1a2eb92 | ||
|
|
012197a861 | ||
|
|
407b2e6930 | ||
|
|
6428febdf6 | ||
|
|
9f9ef1d054 | ||
|
|
ea04663035 | ||
|
|
f0954b3247 | ||
|
|
2fffe78dc9 | ||
|
|
02531c4d15 | ||
|
|
5fa7524ad7 | ||
|
|
21fbdbc55e | ||
|
|
1f1a078450 | ||
|
|
d3aeac4e9f | ||
|
|
e2e3d5a815 | ||
|
|
ddb7fb7d7a | ||
|
|
62d5ce3f34 | ||
|
|
15b3977e88 | ||
|
|
d70f02abed | ||
|
|
e11c4ba8ed | ||
|
|
60eab98782 | ||
|
|
d9f1d14d5e | ||
|
|
64e064e775 | ||
|
|
8c1d62208e | ||
|
|
c4960c3e84 | ||
|
|
82b8fcc608 | ||
|
|
a7c8ea04f1 | ||
|
|
2084ff3e21 | ||
|
|
890ca455b2 | ||
|
|
1dfabf6bda | ||
|
|
604405b2d6 | ||
|
|
190d2280fd | ||
|
|
4e66864cfd | ||
|
|
cac0566627 | ||
|
|
572c103fbf | ||
|
|
9d6bc92837 | ||
|
|
ffe9898fd3 | ||
|
|
a602a46985 | ||
|
|
f7dd3d23ff | ||
|
|
200812d204 | ||
|
|
261c98549d | ||
|
|
b85d9b9eb1 | ||
|
|
4610018193 | ||
|
|
9c9b1ad01c | ||
|
|
2f3a14e946 | ||
|
|
1376dc71d9 | ||
|
|
c1d12384c3 | ||
|
|
eea859dd6f | ||
|
|
3fe630f221 | ||
|
|
eeaefa7208 | ||
|
|
e58c33fb6e | ||
|
|
6716772e0a | ||
|
|
a8367bd4d7 | ||
|
|
ea13f9a575 | ||
|
|
7d152b7bf9 | ||
|
|
16c96229f9 | ||
|
|
40b003be68 | ||
|
|
46111b3987 | ||
|
|
f47726d43b | ||
|
|
502d088c98 | ||
|
|
f845e6e0ee | ||
|
|
e96eed817c | ||
|
|
6a6d1885d8 | ||
|
|
a34eeb63bf | ||
|
|
56acc4f19c | ||
|
|
fdf468ed99 | ||
|
|
680c2a0597 | ||
|
|
5b5dc85677 | ||
|
|
1e691fa751 | ||
|
|
1f87ca0be3 | ||
|
|
f14418603a | ||
|
|
1fae35c05d | ||
|
|
8523079a99 | ||
|
|
4daeb0eead | ||
|
|
86548af518 | ||
|
|
4e5eb6cd40 | ||
|
|
021ce619f0 | ||
|
|
63aaab596c | ||
|
|
bc52af540e | ||
|
|
8bbbdc61eb | ||
|
|
fd5f6c2c97 | ||
|
|
fd145c34cd | ||
|
|
10b3ace917 | ||
|
|
d6a2e0de59 | ||
|
|
35c6605681 | ||
|
|
ef2229b0bb | ||
|
|
b65977d8dc | ||
|
|
bc4176fda0 | ||
|
|
464f3343f3 | ||
|
|
bb6cf42df6 | ||
|
|
0f0cb7e08e | ||
|
|
39d070eab6 | ||
|
|
9ccaa7e2fd | ||
|
|
eeb90949ce | ||
|
|
7b677b20fb | ||
|
|
e2d56bc08a | ||
|
|
d515090097 | ||
|
|
d81dfaf143 | ||
|
|
d7e5ee44cc | ||
|
|
dde39fc6f5 | ||
|
|
9b4fdc1868 | ||
|
|
623afc1d35 | ||
|
|
085652560a | ||
|
|
af4ddb1280 | ||
|
|
7db659f0e1 | ||
|
|
ba526ea09e | ||
|
|
c308e429f8 | ||
|
|
c24ed016cb | ||
|
|
0c9a6d4154 | ||
|
|
7b5c3cacaa | ||
|
|
e6e7876b38 | ||
|
|
0eda520fd7 | ||
|
|
e22b525e9c | ||
|
|
86536aaa10 | ||
|
|
3ef766708f | ||
|
|
95a7f05aa9 | ||
|
|
f692834153 | ||
|
|
a228bb946b | ||
|
|
4d57f47717 | ||
|
|
c8cac5b201 | ||
|
|
f9c1216eec | ||
|
|
266f6f11ec | ||
|
|
1f5ce9c03a | ||
|
|
959d60b31f | ||
|
|
49845fe1ae | ||
|
|
aeb111420e | ||
|
|
6ff3e5f8fe | ||
|
|
d941166d84 | ||
|
|
ac9ba5c7e4 | ||
|
|
9e55f51501 | ||
|
|
43b8cfc7b0 | ||
|
|
633d918da1 | ||
|
|
6b4b9b0775 | ||
|
|
360d29d7be | ||
|
|
4fe7f6cde6 | ||
|
|
6922ca27de | ||
|
|
c3da637849 | ||
|
|
2f1c56285a | ||
|
|
85972b73ea | ||
|
|
6305f19bbb | ||
|
|
275d2cb0af | ||
|
|
d5f57d29ed | ||
|
|
7d8b13f34f | ||
|
|
340137d347 | ||
|
|
61cef8019a | ||
|
|
08308aa9ea | ||
|
|
94ae9e264c | ||
|
|
549e6e70e4 | ||
|
|
15514c8f91 | ||
|
|
29c8bb7a66 | ||
|
|
76f5311e78 | ||
|
|
ca6677149a | ||
|
|
880376aefc | ||
|
|
a20f81d44a | ||
|
|
a8627e7f68 | ||
|
|
4caa622942 | ||
|
|
6b8e73bd32 | ||
|
|
68c4c54b64 | ||
|
|
1dca4b06a2 | ||
|
|
a8ec42233f | ||
|
|
49a7c17ba8 | ||
|
|
8a15e08944 | ||
|
|
8c2d39d517 | ||
|
|
bf06f4ddcc | ||
|
|
28645aa4e4 | ||
|
|
cdcb517bc2 | ||
|
|
a63d547856 | ||
|
|
d994274023 | ||
|
|
a98db07731 | ||
|
|
908a745f95 | ||
|
|
5259bf48b2 | ||
|
|
ecaa011502 | ||
|
|
65cb5beec4 | ||
|
|
fd9c55162d | ||
|
|
ca77c114dd | ||
|
|
5282551277 | ||
|
|
76e1f855f1 | ||
|
|
57173c9b02 | ||
|
|
90a1321aac | ||
|
|
b360e0edc7 | ||
|
|
5ec9ad01a3 | ||
|
|
96f0d2a8f1 | ||
|
|
cba4d76b75 | ||
|
|
09beb84586 | ||
|
|
7803dad430 | ||
|
|
52c510501d | ||
|
|
bdd545727b | ||
|
|
1044886e7d | ||
|
|
cefb934a2c | ||
|
|
37614a3362 | ||
|
|
7f3033b1c1 | ||
|
|
7387a25d65 | ||
|
|
e1eafede65 | ||
|
|
9d7b77059f | ||
|
|
7519603fbd | ||
|
|
bafc3225d2 | ||
|
|
174393b5cb | ||
|
|
b77672dda4 | ||
|
|
1e91fa9f9e | ||
|
|
16083130f8 | ||
|
|
2c11392848 | ||
|
|
30ff742310 | ||
|
|
84168825d6 | ||
|
|
311ce2e4bc | ||
|
|
ea5c0bc9a4 | ||
|
|
0bd2cff5b7 | ||
|
|
faf32b5086 | ||
|
|
8f7ab3e268 | ||
|
|
a433861f77 | ||
|
|
886a8ef8b0 | ||
|
|
3124125b4c | ||
|
|
d0523684e5 | ||
|
|
b86cdd6644 | ||
|
|
55fa170b4e | ||
|
|
d2d6cce5f4 | ||
|
|
178d45e232 | ||
|
|
09d99abee6 | ||
|
|
6e93c36b89 | ||
|
|
fae2f7e279 | ||
|
|
2e68a18afd | ||
|
|
05514631f2 | ||
|
|
e9fb7be85f | ||
|
|
42fbc1936d | ||
|
|
87d38a3374 | ||
|
|
6aa79c6dc9 | ||
|
|
1bd3d9c9bf | ||
|
|
86d3e36722 | ||
|
|
05f762117a | ||
|
|
1298fdd20f | ||
|
|
ef770ff29b | ||
|
|
02d66325a0 | ||
|
|
a5024bdcbb | ||
|
|
6cb819cb3a | ||
|
|
08099cdcb9 | ||
|
|
1451594ae6 | ||
|
|
2e90230097 | ||
|
|
f90c6b9fab | ||
|
|
853977c676 | ||
|
|
2087f2d350 | ||
|
|
f4585c8dea | ||
|
|
a2c599d6fa | ||
|
|
256a07e584 | ||
|
|
b361f42c1c | ||
|
|
33f2aef4e6 | ||
|
|
4fb6b2d1de | ||
|
|
373f1d57c1 | ||
|
|
81f4d084b0 | ||
|
|
2a13d8b17f | ||
|
|
27a0129f72 | ||
|
|
7e3d9007cd | ||
|
|
df4d6fdc45 | ||
|
|
f28b6c6197 | ||
|
|
1825ed3bcf | ||
|
|
504ccfebbc | ||
|
|
74ad2d0463 | ||
|
|
0af84be775 | ||
|
|
6043e6aa3b | ||
|
|
e3dba87e08 | ||
|
|
ad6c18f615 | ||
|
|
be498acf59 | ||
|
|
6a45035e3f | ||
|
|
28bd781062 | ||
|
|
9922d455da | ||
|
|
ac23fe5b5a | ||
|
|
bab5625123 | ||
|
|
f674b90a62 | ||
|
|
6a545fdeb7 | ||
|
|
b01f021f1c | ||
|
|
f934ea6664 | ||
|
|
52639c9bdd | ||
|
|
152fb6b6ad | ||
|
|
990cf8a05d | ||
|
|
713894090d | ||
|
|
2391c77910 | ||
|
|
ffb0e90ff3 | ||
|
|
740bd1b61e | ||
|
|
a364a10d6a | ||
|
|
441bcb9e99 | ||
|
|
714f0c539b | ||
|
|
255d4244ea | ||
|
|
4fb247f7c5 | ||
|
|
54fd94547c | ||
|
|
96b44e1482 | ||
|
|
c268b531aa | ||
|
|
0b6e9db8e4 | ||
|
|
9157c5c78b | ||
|
|
54fb7afdb2 | ||
|
|
92ed2524b7 | ||
|
|
56c03c847a | ||
|
|
9129c981a4 | ||
|
|
da68ba0b82 | ||
|
|
e21d801523 | ||
|
|
195438d26a | ||
|
|
5bb01755bc | ||
|
|
520f2d26f2 | ||
|
|
8ac27548ad | ||
|
|
adc0dd23e4 | ||
|
|
31a45f1f30 | ||
|
|
4bde13e83a | ||
|
|
a5ab3f8b26 | ||
|
|
d183a647dd | ||
|
|
e5797bff8f | ||
|
|
4d73a3c9a9 | ||
|
|
754cddd4ad | ||
|
|
f6cc3736b2 | ||
|
|
6e99cd97ca | ||
|
|
f566b8aabc | ||
|
|
6efc499c77 | ||
|
|
2c675ee4db | ||
|
|
f6dfe28e08 | ||
|
|
e8e8746cc6 | ||
|
|
603bc00bca | ||
|
|
ae76926d5a | ||
|
|
fd48045fe3 | ||
|
|
6ec6643448 | ||
|
|
945fda2d14 | ||
|
|
7d71f603fe | ||
|
|
bd11a538a7 | ||
|
|
b9b4da6d8c | ||
|
|
70f8b14eaa | ||
|
|
0c8b2f2ec9 | ||
|
|
d532b3fd02 | ||
|
|
c56104c082 | ||
|
|
66ae1972ae | ||
|
|
7f4433e449 | ||
|
|
e1f2fc72d9 | ||
|
|
aa093f9468 | ||
|
|
a27f76abcb | ||
|
|
df34ef38d9 | ||
|
|
60fbb4177c | ||
|
|
3289562be7 | ||
|
|
73fc68a187 | ||
|
|
bce6fa7a91 | ||
|
|
88724a4df9 | ||
|
|
5914b1c5fc | ||
|
|
d8be23fa83 | ||
|
|
ffbc4a4b76 | ||
|
|
dd62a7ac13 | ||
|
|
3f29dfd4cf | ||
|
|
3fdd52742b | ||
|
|
76ab4d67fe | ||
|
|
c859af1abf | ||
|
|
6a73d3c379 | ||
|
|
5d5652c2c5 | ||
|
|
b958a1ea96 | ||
|
|
bc385a32fd | ||
|
|
9a45732a39 | ||
|
|
015b46e58b | ||
|
|
042a99dbe3 | ||
|
|
99291053f5 | ||
|
|
99eeeff6f7 | ||
|
|
883b9f0672 | ||
|
|
3c07e743e1 | ||
|
|
823e1dc487 | ||
|
|
3537c0fc74 | ||
|
|
f3e23f0a57 | ||
|
|
3df1eac2fc | ||
|
|
141472117d | ||
|
|
e2dbeca080 | ||
|
|
70063f4045 | ||
|
|
8578d2d426 | ||
|
|
5d31bfd9fa | ||
|
|
c7291ba532 | ||
|
|
1396010437 | ||
|
|
1654b121bc | ||
|
|
d5130fc4da | ||
|
|
c4f3afd8eb | ||
|
|
fb2f80ee3a | ||
|
|
dda6af130c | ||
|
|
0d82c9fa03 | ||
|
|
7c389d5028 | ||
|
|
d5704f8344 | ||
|
|
ec4018a930 | ||
|
|
673cb03a2e | ||
|
|
4d7bf5b245 | ||
|
|
267426e332 | ||
|
|
d68401fa1a | ||
|
|
eb4ba89693 | ||
|
|
ef3b6b9f6e | ||
|
|
2d1be7cd4f | ||
|
|
bf0a2bde34 | ||
|
|
d85ab2a12c | ||
|
|
33a2bdb9f0 | ||
|
|
11a7dcb6c8 | ||
|
|
d8e389df00 | ||
|
|
203b51527b | ||
|
|
bf05886770 | ||
|
|
079ecdad3e | ||
|
|
075a8357cd | ||
|
|
c99ad377c6 | ||
|
|
382d330525 | ||
|
|
e2f4241b2e | ||
|
|
32cea006b9 | ||
|
|
6ffac8810b | ||
|
|
84d06f4273 | ||
|
|
18cc536f65 | ||
|
|
af2ff54cb7 | ||
|
|
6486c56850 | ||
|
|
93dcdd2293 | ||
|
|
58caccb250 | ||
|
|
598eed92cb | ||
|
|
d3e7ecca21 | ||
|
|
847abcefce | ||
|
|
c24ad501b5 | ||
|
|
35c7fe28bb | ||
|
|
a33cacfd75 | ||
|
|
338c3d612c | ||
|
|
8b17fad723 | ||
|
|
169f218f7a | ||
|
|
3ef1e54412 | ||
|
|
4419c50942 | ||
|
|
7aa1cda367 | ||
|
|
a2c88ba885 | ||
|
|
e16950ef1e | ||
|
|
5b973b00ea | ||
|
|
3a1ebf8684 | ||
|
|
2eaefb61ab | ||
|
|
4c6b28030f | ||
|
|
2c42cefa5a | ||
|
|
35ffd3419e | ||
|
|
e3223edbb1 | ||
|
|
a061fc1428 | ||
|
|
0992d27523 | ||
|
|
5aa0c9610d | ||
|
|
7620ff703d | ||
|
|
d705a3e7d9 | ||
|
|
726151bfea | ||
|
|
b58589ddad | ||
|
|
2e493277a1 | ||
|
|
8b19edd2de | ||
|
|
3e54b5f7d8 | ||
|
|
4da06864f8 | ||
|
|
8f310339df | ||
|
|
0157e36344 | ||
|
|
cdf4833977 | ||
|
|
c8a914aeca | ||
|
|
a5ba7c0f6c | ||
|
|
1cf0d92ec2 | ||
|
|
02930bd56b | ||
|
|
4061ae48c4 | ||
|
|
ecd5085e51 | ||
|
|
6bc8b7de95 | ||
|
|
e79e33773f | ||
|
|
0c0301d811 | ||
|
|
89f6ac6804 | ||
|
|
f14c3299bc | ||
|
|
a73828b4d6 | ||
|
|
6244bf0405 | ||
|
|
90852c7788 | ||
|
|
3b842ed290 | ||
|
|
673e1d117a | ||
|
|
f64f619713 | ||
|
|
a742fa0f8a | ||
|
|
6894c7e80b | ||
|
|
203100431b | ||
|
|
e8b9bcae92 | ||
|
|
052351ab5b | ||
|
|
9dd84e3416 | ||
|
|
211c25d969 | ||
|
|
275684d319 | ||
|
|
0f8a47e8f6 | ||
|
|
303c840464 | ||
|
|
b15008fbce | ||
|
|
a8cf3e1ad6 | ||
|
|
0515ef6e8b | ||
|
|
777d5df573 | ||
|
|
c5f379ba01 | ||
|
|
145d38c9bd | ||
|
|
eab957ce00 | ||
|
|
b5fb077ad6 | ||
|
|
ebcbb11cb2 | ||
|
|
a1413dd1b3 | ||
|
|
4e6ee2db25 | ||
|
|
8e744597d1 | ||
|
|
dfa8b541b4 | ||
|
|
1dc55f8811 | ||
|
|
501d9a05d4 | ||
|
|
229d51cd18 | ||
|
|
40e61b30d6 | ||
|
|
3c3ce55842 | ||
|
|
e3e61bcae9 | ||
|
|
dfca4d60ee | ||
|
|
e671b45948 | ||
|
|
b00113d212 | ||
|
|
9b926d1a1e | ||
|
|
98c9f1a830 | ||
|
|
46ac591fe8 | ||
|
|
bf66b095c7 | ||
|
|
5228581324 | ||
|
|
c9c704e671 | ||
|
|
16d4c7c646 | ||
|
|
39056292b7 | ||
|
|
87ffd283ce | ||
|
|
8eb42816f1 | ||
|
|
ebdf64c0b9 | ||
|
|
caab5f476e | ||
|
|
1998f3ae8a | ||
|
|
5ff2a43b70 | ||
|
|
3cd842ca1a | ||
|
|
86cefa7bda | ||
|
|
fdac697f6e | ||
|
|
8203d690cb | ||
|
|
cf58dc0dd3 | ||
|
|
6a69af3bf1 | ||
|
|
acdfbb4644 | ||
|
|
72f24bf535 | ||
|
|
ba23244876 | ||
|
|
624f9f18b4 | ||
|
|
17002345c9 | ||
|
|
f3f2051c45 | ||
|
|
e60d793c8c | ||
|
|
7ecc64614a | ||
|
|
0311237db2 | ||
|
|
11d8187258 | ||
|
|
fc4a9af0cb | ||
|
|
fa64e11a77 | ||
|
|
210f0f1012 | ||
|
|
6d3f10d1d7 | ||
|
|
09483c9f07 | ||
|
|
2871950ab8 | ||
|
|
5849f751bc | ||
|
|
45f92fe066 | ||
|
|
f492f4839a | ||
|
|
fa81793bea | ||
|
|
c12ef3e772 | ||
|
|
6eebdb8898 | ||
|
|
3e9a309079 | ||
|
|
15d5890861 | ||
|
|
89b3475508 | ||
|
|
6e301538ed | ||
|
|
c3a31f2c5d | ||
|
|
559b1e02a7 | ||
|
|
9e4412c7a8 | ||
|
|
6dab38172f | ||
|
|
f1ee46e1ac | ||
|
|
775928456d | ||
|
|
fd4a15c84e | ||
|
|
be725ce21f | ||
|
|
fa31552cc1 | ||
|
|
a3ccf5baed | ||
|
|
8c6225b749 | ||
|
|
89e77c0089 | ||
|
|
b27d8a9570 | ||
|
|
4a3ff82200 | ||
|
|
bfbab44756 | ||
|
|
4458af83d8 | ||
|
|
6b62b5b5a9 | ||
|
|
31cc060837 | ||
|
|
ea284d739a | ||
|
|
ab06ed0083 | ||
|
|
4de4db3c69 | ||
|
|
e1cac5dd50 | ||
|
|
7adde91e9f | ||
|
|
3428642d04 | ||
|
|
2f0cce0089 | ||
|
|
c7ced2bfbb | ||
|
|
69049e3f45 | ||
|
|
e17e9a6473 | ||
|
|
5e91ba6c60 | ||
|
|
9f6e6852da | ||
|
|
68f9de0c69 | ||
|
|
17af615fe2 | ||
|
|
4577be71ce | ||
|
|
0311d63b7d | ||
|
|
440314c16d | ||
|
|
8dd4a513c8 | ||
|
|
e096fc98e2 | ||
|
|
4329bd8e80 | ||
|
|
ae07df612d | ||
|
|
d5d6f1fbbe | ||
|
|
b9d068d6d4 | ||
|
|
48ac43d628 | ||
|
|
79da2c8c17 | ||
|
|
6aac7bb8e3 | ||
|
|
51a61bef31 | ||
|
|
44d84116c3 | ||
|
|
474a1ce027 | ||
|
|
b22839c99f | ||
|
|
8b927f302c | ||
|
|
c16da759b2 | ||
|
|
74a830694c | ||
|
|
d06a3ca12e | ||
|
|
154a9283b5 | ||
|
|
b702791c2c | ||
|
|
d21066c282 | ||
|
|
df23975a0b | ||
|
|
3da0ef2adb | ||
|
|
35485bbbb1 | ||
|
|
894b93e08d | ||
|
|
97640a517a | ||
|
|
ee0886fc48 | ||
|
|
0fe16963cd | ||
|
|
82dcafff00 | ||
|
|
3ffb907a6f | ||
|
|
d91477ad80 | ||
|
|
0529b57694 | ||
|
|
79a2953862 | ||
|
|
8d542b8e45 | ||
|
|
ac9060ab3a | ||
|
|
1c9716e460 | ||
|
|
7e70e4c299 | ||
|
|
ac43cf85ec | ||
|
|
08dc0a0348 | ||
|
|
90adef6cfb | ||
|
|
d4499cc6d7 | ||
|
|
958cf290e2 | ||
|
|
d3a522f3e8 | ||
|
|
52935d4b8e | ||
|
|
32217f87fd | ||
|
|
675aff26ff | ||
|
|
029384c427 | ||
|
|
37417caca2 | ||
|
|
8f58e4e48a | ||
|
|
68c872ad36 | ||
|
|
c780544792 | ||
|
|
23e15e479e | ||
|
|
684618e72b | ||
|
|
93d3df1e08 | ||
|
|
335f5e9ec6 | ||
|
|
30e9ae0153 | ||
|
|
25ac862f46 | ||
|
|
d4e59770d0 | ||
|
|
15122b9ebb | ||
|
|
a41e6d19fd | ||
|
|
e879ec7189 | ||
|
|
4faa5f1c95 | ||
|
|
c42f91a7fe | ||
|
|
92d2085b64 | ||
|
|
6a39f7e69d | ||
|
|
dfa8dbc52a | ||
|
|
a393601ec5 | ||
|
|
b74a90b416 | ||
|
|
77de8d857b | ||
|
|
76c1745269 | ||
|
|
811382775d | ||
|
|
e8f1caa219 | ||
|
|
15c5cd5f6e | ||
|
|
766a8d2145 | ||
|
|
e350e0c7bb | ||
|
|
db4ab85d3e | ||
|
|
cfcd277a58 | ||
|
|
c256fd9379 | ||
|
|
0a4c205105 | ||
|
|
e815c3c10e | ||
|
|
8eb1a4e52e | ||
|
|
19648721fc | ||
|
|
b81d1039c5 | ||
|
|
a667b7548c | ||
|
|
598bea9b21 | ||
|
|
df104d6e9b | ||
|
|
417f3c0f8c | ||
|
|
5114a942dc | ||
|
|
edef937822 | ||
|
|
faa86eded0 | ||
|
|
44fa6e0a42 | ||
|
|
be9a1c76d4 | ||
|
|
fcc811d6a1 | ||
|
|
906404f075 | ||
|
|
1267c8d0f4 | ||
|
|
eb1093128e | ||
|
|
4ddeb6551e | ||
|
|
7252c2ff3d | ||
|
|
8dee45c0a3 | ||
|
|
99ead8b165 | ||
|
|
0c7f13d9a4 | ||
|
|
ed32b95de1 | ||
|
|
beacc2e26b | ||
|
|
389621c954 | ||
|
|
2ba7756d13 | ||
|
|
02f77c0a51 | ||
|
|
5aa8d37cd0 | ||
|
|
a7b8ffc716 | ||
|
|
b0bc53646e | ||
|
|
5f31c9ad7e | ||
|
|
818d9f3f5d | ||
|
|
1c3c070db4 | ||
|
|
91e4792aa9 | ||
|
|
813bfa8f97 | ||
|
|
8b29f6bb7c | ||
|
|
27273405f7 | ||
|
|
f4299457fb | ||
|
|
06983a35ad | ||
|
|
a80953527b | ||
|
|
0f469e225b | ||
|
|
5dca69fbec | ||
|
|
ac626e5895 | ||
|
|
cb78758839 | ||
|
|
844a2412b2 | ||
|
|
650d877430 | ||
|
|
f459061ad5 | ||
|
|
a6f9701679 | ||
|
|
26a325efff | ||
|
|
0a96ee16a8 | ||
|
|
43c962b48b | ||
|
|
724545ebd6 | ||
|
|
a9a2004d4a | ||
|
|
5b14c8a832 | ||
|
|
e2c5a514cb | ||
|
|
296761a34e | ||
|
|
1d3436d51b | ||
|
|
60bb11c315 | ||
|
|
72fe6195af | ||
|
|
04fb3b7ee3 | ||
|
|
942fca7ad8 | ||
|
|
39df995e37 | ||
|
|
efaa8b6620 | ||
|
|
35bd0aa8f6 | ||
|
|
0f9adc59f9 | ||
|
|
c43a72ef46 | ||
|
|
7a61119c55 | ||
|
|
d620eac621 | ||
|
|
1dbffbee2d | ||
|
|
c67817f46e | ||
|
|
d654419423 | ||
|
|
1e2240dbe9 | ||
|
|
b3778ef48c | ||
|
|
a16cf5c8d3 | ||
|
|
d82bf5a823 | ||
|
|
132eec900c | ||
|
|
09114f59c8 | ||
|
|
72099ae951 | ||
|
|
d66064024c | ||
|
|
8c93848303 | ||
|
|
57a86ab36f | ||
|
|
e75cdf0b61 | ||
|
|
79b13f363b | ||
|
|
87d5a1292d | ||
|
|
3e6ed5e4c3 | ||
|
|
96dd9bef5f | ||
|
|
697a646fc9 | ||
|
|
cde17bd668 | ||
|
|
98b72f086d | ||
|
|
196b805499 | ||
|
|
beb839d8e2 | ||
|
|
2aa39bd355 | ||
|
|
a62d30acb9 | ||
|
|
8bc5b40957 | ||
|
|
2a11d5f190 | ||
|
|
964bbbf5bc | ||
|
|
75ad427862 | ||
|
|
edda988790 | ||
|
|
a8961761ec | ||
|
|
2b80a02d51 | ||
|
|
969242dbbc | ||
|
|
ef09914f94 | ||
|
|
2f4ecf9ae3 | ||
|
|
b000359e69 | ||
|
|
84b428b52f | ||
|
|
2443c64c61 | ||
|
|
f7593387a0 | ||
|
|
64674803c4 | ||
|
|
1252f4f7c6 | ||
|
|
c862ac225b | ||
|
|
5375c991ba | ||
|
|
7b692ce415 | ||
|
|
2cf8efec74 | ||
|
|
34a9a23d5b | ||
|
|
cf6a0f1bc0 | ||
|
|
247db0d041 | ||
|
|
fec5d9a605 | ||
|
|
97fea9f19e | ||
|
|
6717e2a59b | ||
|
|
84c180ab66 | ||
|
|
e70f086b7e | ||
|
|
6359a364cb | ||
|
|
8f2126677f | ||
|
|
c3e87db5be | ||
|
|
a6561a7d01 | ||
|
|
4bd732c4db | ||
|
|
152303f1b8 | ||
|
|
2d66c1b092 | ||
|
|
93d8e79b71 | ||
|
|
1e69539837 | ||
|
|
cd206f275e | ||
|
|
d99448ffd5 | ||
|
|
217f30044a | ||
|
|
7e60e3e198 | ||
|
|
783ee4b570 | ||
|
|
7725e733b0 | ||
|
|
2e8fe1e77a | ||
|
|
32c9595818 | ||
|
|
bb427dc639 | ||
|
|
97b2247896 | ||
|
|
ed7dfad0a5 | ||
|
|
19acaea0f9 | ||
|
|
481a716c09 | ||
|
|
07775cda30 | ||
|
|
3acf6fcba8 | ||
|
|
f798dd4172 | ||
|
|
aabc6294f4 | ||
|
|
adbb2070bb | ||
|
|
3c9cf3a677 | ||
|
|
ff808ed539 | ||
|
|
99a5c75b13 | ||
|
|
7453987cfe | ||
|
|
4bb4bdc124 | ||
|
|
3915f7cb35 | ||
|
|
657af628fd | ||
|
|
b649360cd6 | ||
|
|
20aa0f3a0b | ||
|
|
c8dd1adc69 | ||
|
|
d53e7e18db | ||
|
|
0207677857 | ||
|
|
72f27fb2f8 | ||
|
|
be129f5821 | ||
|
|
b1bb74af0d | ||
|
|
a7a654805c | ||
|
|
c0c894ced1 | ||
|
|
7517f4f8ec | ||
|
|
0b45ff7345 | ||
|
|
0416b23186 | ||
|
|
948cf3fcd7 | ||
|
|
4272ca9ebd | ||
|
|
73fed4893b | ||
|
|
f09c6e2a7a | ||
|
|
65a204a563 | ||
|
|
ffbc440a7e | ||
|
|
3c28c61bea | ||
|
|
b0b99a4217 | ||
|
|
4f533f6fd5 | ||
|
|
530c348e95 | ||
|
|
a98b26b111 | ||
|
|
9f7e33cbde | ||
|
|
a25464ce28 | ||
|
|
0a3f2a5b03 | ||
|
|
1929b7f72d | ||
|
|
b8889d99c9 | ||
|
|
a79a3221ce | ||
|
|
67c18d1b03 | ||
|
|
2301f263cd | ||
|
|
8d828e8762 | ||
|
|
b573450821 | ||
|
|
229a9867e6 | ||
|
|
6fe31cc408 | ||
|
|
196951ff4f | ||
|
|
61c08e1585 | ||
|
|
07caf20e0d | ||
|
|
1e9ca574ed | ||
|
|
d0ceb835b5 | ||
|
|
fad32d7caf | ||
|
|
806b782b03 | ||
|
|
a62bbd6a7f | ||
|
|
2a7d55264d | ||
|
|
837bee79c7 | ||
|
|
d8ead86b67 | ||
|
|
8c2a7b6983 | ||
|
|
f5ca033ee8 | ||
|
|
842ed624e8 | ||
|
|
c34a6042c0 | ||
|
|
383da9ebb7 | ||
|
|
4693527a8e | ||
|
|
5f0dab409b | ||
|
|
c679253c30 | ||
|
|
fc965c87d7 | ||
|
|
50a36ded97 | ||
|
|
c5a0f635f4 | ||
|
|
ca9653c2e6 | ||
|
|
38f2355573 | ||
|
|
2fb1015038 | ||
|
|
d7bee9bdf2 | ||
|
|
751d251433 | ||
|
|
51b1eb5da6 | ||
|
|
275ed051c6 | ||
|
|
fa7f37695e | ||
|
|
5e69748016 | ||
|
|
f1fff34a9d | ||
|
|
8ae3da8f61 | ||
|
|
62ffc5c645 | ||
|
|
758321b829 | ||
|
|
85d7fd9340 | ||
|
|
fbd41a0851 | ||
|
|
2a63ab5e0a | ||
|
|
46527c5b9a | ||
|
|
b9e893245b | ||
|
|
d96a8a06a0 | ||
|
|
957473aa71 | ||
|
|
c56bf68d87 | ||
|
|
9627b42c03 | ||
|
|
292dc113e3 | ||
|
|
c3818fdb79 | ||
|
|
9f322e0f34 | ||
|
|
89a61acb71 | ||
|
|
9b07310d68 | ||
|
|
487b359266 | ||
|
|
bc5ddb3670 | ||
|
|
45a082d963 | ||
|
|
19ebb2dc82 | ||
|
|
d9fcdad949 | ||
|
|
2aacc34c24 | ||
|
|
4dafec7054 | ||
|
|
b4e09213e4 | ||
|
|
3f7db2fdbc | ||
|
|
7bcf7f24a3 | ||
|
|
0a6c90c345 | ||
|
|
4a0eef03a2 | ||
|
|
9cb9b2213b | ||
|
|
0e21c0dba7 | ||
|
|
8e4e751655 | ||
|
|
6ebb1801d1 | ||
|
|
0380cbb7b8 | ||
|
|
85ef755c12 | ||
|
|
a5effb9784 | ||
|
|
1d766ed4ad | ||
|
|
fe0d30256c | ||
|
|
1c416b538d | ||
|
|
81362c14de | ||
|
|
fa6257ecae | ||
|
|
ccb4490ed4 | ||
|
|
58206f1996 | ||
|
|
564bcb72ea | ||
|
|
965a80b54e | ||
|
|
8f55bf2ece | ||
|
|
a721c50ba3 | ||
|
|
4a5c8490b1 | ||
|
|
2f0ca988f4 | ||
|
|
a45f5e9dc2 | ||
|
|
b8dc3018d4 | ||
|
|
9d4c9ef212 | ||
|
|
d7ffd6ee32 | ||
|
|
02ee426af0 | ||
|
|
e76e5bbf5c | ||
|
|
763c51cb28 | ||
|
|
c7542d95c8 | ||
|
|
02bf6e296c | ||
|
|
f839a3afb8 | ||
|
|
79714edc9a | ||
|
|
f9c33bd0ba | ||
|
|
e4a29c0b2e | ||
|
|
ca18043b14 | ||
|
|
871a02c1f8 | ||
|
|
3747a7b083 |
44
.claude/CLAUDE.md
Normal file
44
.claude/CLAUDE.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Claude Instructions
|
||||
|
||||
- **Coding Philosophy**: @~/.claude/workflows/coding-philosophy.md
|
||||
|
||||
## CLI Endpoints
|
||||
|
||||
- **CLI Tools Usage**: @~/.claude/workflows/cli-tools-usage.md
|
||||
- **CLI Endpoints Config**: @~/.claude/cli-tools.json
|
||||
|
||||
**Strictly follow the cli-tools.json configuration**
|
||||
|
||||
Available CLI endpoints are dynamically defined by the config file
|
||||
## Tool Execution
|
||||
|
||||
- **Context Requirements**: @~/.claude/workflows/context-tools.md
|
||||
- **File Modification**: @~/.claude/workflows/file-modification.md
|
||||
|
||||
### Agent Calls
|
||||
- **Always use `run_in_background: false`** for Task tool agent calls: `Task({ subagent_type: "xxx", prompt: "...", run_in_background: false })` to ensure synchronous execution and immediate result visibility
|
||||
- **TaskOutput usage**: Only use `TaskOutput({ task_id: "xxx", block: false })` + sleep loop to poll completion status. NEVER read intermediate output during agent/CLI execution - wait for final result only
|
||||
|
||||
### CLI Tool Calls (ccw cli)
|
||||
- **Default: Use Bash `run_in_background: true`** - Unless otherwise specified, always execute CLI calls in background using Bash tool's background mode:
|
||||
```
|
||||
Bash({
|
||||
command: "ccw cli -p '...' --tool gemini",
|
||||
run_in_background: true // Bash tool parameter, not ccw cli parameter
|
||||
})
|
||||
```
|
||||
- **After CLI call**: Stop output immediately - let CLI execute in background. **DO NOT use TaskOutput polling** - wait for hook callback to receive results
|
||||
|
||||
### CLI Analysis Calls
|
||||
- **Wait for results**: MUST wait for CLI analysis to complete before taking any write action. Do NOT proceed with fixes while analysis is running
|
||||
- **Value every call**: Each CLI invocation is valuable and costly. NEVER waste analysis results:
|
||||
- Aggregate multiple analysis results before proposing solutions
|
||||
|
||||
### CLI Auto-Invoke Triggers
|
||||
- **Reference**: See `cli-tools-usage.md` → [Auto-Invoke Triggers](#auto-invoke-triggers) for full specification
|
||||
- **Key scenarios**: Self-repair fails, ambiguous requirements, architecture decisions, pattern uncertainty, critical code paths
|
||||
- **Principles**: Default `--mode analysis`, no confirmation needed, wait for completion, flexible rule selection
|
||||
|
||||
## Code Diagnostics
|
||||
|
||||
- **Prefer `mcp__ide__getDiagnostics`** for code error checking over shell-based TypeScript compilation
|
||||
366
.claude/TYPESCRIPT_LSP_SETUP.md
Normal file
366
.claude/TYPESCRIPT_LSP_SETUP.md
Normal file
@@ -0,0 +1,366 @@
|
||||
# Claude Code TypeScript LSP 配置指南
|
||||
|
||||
> 更新日期: 2026-01-20
|
||||
> 适用版本: Claude Code v2.0.74+
|
||||
|
||||
---
|
||||
|
||||
## 目录
|
||||
|
||||
1. [方式一:插件市场(推荐)](#方式一插件市场推荐)
|
||||
2. [方式二:MCP Server (cclsp)](#方式二mcp-server-cclsp)
|
||||
3. [方式三:内置LSP工具](#方式三内置lsp工具)
|
||||
4. [配置验证](#配置验证)
|
||||
5. [故障排查](#故障排查)
|
||||
|
||||
---
|
||||
|
||||
## 方式一:插件市场(推荐)
|
||||
|
||||
### 步骤 1: 添加插件市场
|
||||
|
||||
在Claude Code中执行:
|
||||
|
||||
```bash
|
||||
/plugin marketplace add boostvolt/claude-code-lsps
|
||||
```
|
||||
|
||||
### 步骤 2: 安装TypeScript LSP插件
|
||||
|
||||
```bash
|
||||
# TypeScript/JavaScript支持(推荐vtsls)
|
||||
/plugin install vtsls@claude-code-lsps
|
||||
```
|
||||
|
||||
### 步骤 3: 验证安装
|
||||
|
||||
```bash
|
||||
/plugin list
|
||||
```
|
||||
|
||||
应该看到:
|
||||
```
|
||||
✓ vtsls@claude-code-lsps (enabled)
|
||||
✓ pyright-lsp@claude-plugins-official (enabled)
|
||||
```
|
||||
|
||||
### 配置文件自动更新
|
||||
|
||||
安装后,`~/.claude/settings.json` 会自动添加:
|
||||
|
||||
```json
|
||||
{
|
||||
"enabledPlugins": {
|
||||
"pyright-lsp@claude-plugins-official": true,
|
||||
"vtsls@claude-code-lsps": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 支持的操作
|
||||
|
||||
- `goToDefinition` - 跳转到定义
|
||||
- `findReferences` - 查找引用
|
||||
- `hover` - 显示类型信息
|
||||
- `documentSymbol` - 文档符号
|
||||
- `getDiagnostics` - 诊断信息
|
||||
|
||||
---
|
||||
|
||||
## 方式二:MCP Server (cclsp)
|
||||
|
||||
### 优势
|
||||
|
||||
- **位置容错**:自动修正AI生成的不精确行号
|
||||
- **更多功能**:支持重命名、完整诊断
|
||||
- **灵活配置**:完全自定义LSP服务器
|
||||
|
||||
### 安装步骤
|
||||
|
||||
#### 1. 安装TypeScript Language Server
|
||||
|
||||
```bash
|
||||
npm install -g typescript-language-server typescript
|
||||
```
|
||||
|
||||
验证安装:
|
||||
```bash
|
||||
typescript-language-server --version
|
||||
```
|
||||
|
||||
#### 2. 配置cclsp
|
||||
|
||||
运行自动配置:
|
||||
```bash
|
||||
npx cclsp@latest setup --user
|
||||
```
|
||||
|
||||
或手动创建配置文件:
|
||||
|
||||
**文件位置**: `~/.claude/cclsp.json` 或 `~/.config/claude/cclsp.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"servers": [
|
||||
{
|
||||
"extensions": ["ts", "tsx", "js", "jsx"],
|
||||
"command": ["typescript-language-server", "--stdio"],
|
||||
"rootDir": ".",
|
||||
"restartInterval": 5,
|
||||
"initializationOptions": {
|
||||
"preferences": {
|
||||
"includeInlayParameterNameHints": "all",
|
||||
"includeInlayPropertyDeclarationTypeHints": true,
|
||||
"includeInlayFunctionParameterTypeHints": true,
|
||||
"includeInlayVariableTypeHints": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"extensions": ["py", "pyi"],
|
||||
"command": ["pylsp"],
|
||||
"rootDir": ".",
|
||||
"restartInterval": 5
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### 3. 在Claude Code中启用MCP Server
|
||||
|
||||
添加到Claude Code配置:
|
||||
|
||||
```bash
|
||||
# 查看当前MCP配置
|
||||
cat ~/.claude/.mcp.json
|
||||
|
||||
# 如果没有,创建新的
|
||||
```
|
||||
|
||||
**文件**: `~/.claude/.mcp.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"cclsp": {
|
||||
"command": "npx",
|
||||
"args": ["cclsp@latest"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### cclsp可用的MCP工具
|
||||
|
||||
使用时,Claude Code会自动调用这些工具:
|
||||
|
||||
- `find_definition` - 按名称查找定义(支持模糊匹配)
|
||||
- `find_references` - 查找所有引用
|
||||
- `rename_symbol` - 重命名符号(带备份)
|
||||
- `get_diagnostics` - 获取诊断信息
|
||||
- `restart_server` - 重启LSP服务器
|
||||
|
||||
---
|
||||
|
||||
## 方式三:内置LSP工具
|
||||
|
||||
### 启用方式
|
||||
|
||||
设置环境变量:
|
||||
|
||||
**Linux/Mac**:
|
||||
```bash
|
||||
export ENABLE_LSP_TOOL=1
|
||||
claude
|
||||
```
|
||||
|
||||
**Windows (PowerShell)**:
|
||||
```powershell
|
||||
$env:ENABLE_LSP_TOOL=1
|
||||
claude
|
||||
```
|
||||
|
||||
**永久启用** (添加到shell配置):
|
||||
```bash
|
||||
# Linux/Mac
|
||||
echo 'export ENABLE_LSP_TOOL=1' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
# Windows (PowerShell Profile)
|
||||
Add-Content $PROFILE '$env:ENABLE_LSP_TOOL=1'
|
||||
```
|
||||
|
||||
### 限制
|
||||
|
||||
- 需要先安装语言服务器插件(见方式一)
|
||||
- 不支持重命名等高级操作
|
||||
- 无位置容错功能
|
||||
|
||||
---
|
||||
|
||||
## 配置验证
|
||||
|
||||
### 1. 检查LSP服务器是否可用
|
||||
|
||||
```bash
|
||||
# 检查TypeScript Language Server
|
||||
which typescript-language-server # Linux/Mac
|
||||
where typescript-language-server # Windows
|
||||
|
||||
# 测试运行
|
||||
typescript-language-server --stdio
|
||||
```
|
||||
|
||||
### 2. 在Claude Code中测试
|
||||
|
||||
打开任意TypeScript文件,让Claude执行:
|
||||
|
||||
```typescript
|
||||
// 测试LSP功能
|
||||
LSP({
|
||||
operation: "hover",
|
||||
filePath: "path/to/your/file.ts",
|
||||
line: 10,
|
||||
character: 5
|
||||
})
|
||||
```
|
||||
|
||||
### 3. 检查插件状态
|
||||
|
||||
```bash
|
||||
/plugin list
|
||||
```
|
||||
|
||||
查看启用的插件:
|
||||
```bash
|
||||
cat ~/.claude/settings.json | grep enabledPlugins
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 故障排查
|
||||
|
||||
### 问题 1: "No LSP server available"
|
||||
|
||||
**原因**:TypeScript LSP插件未安装或未启用
|
||||
|
||||
**解决**:
|
||||
```bash
|
||||
# 重新安装插件
|
||||
/plugin install vtsls@claude-code-lsps
|
||||
|
||||
# 检查settings.json
|
||||
cat ~/.claude/settings.json
|
||||
```
|
||||
|
||||
### 问题 2: "typescript-language-server: command not found"
|
||||
|
||||
**原因**:未安装TypeScript Language Server
|
||||
|
||||
**解决**:
|
||||
```bash
|
||||
npm install -g typescript-language-server typescript
|
||||
|
||||
# 验证
|
||||
typescript-language-server --version
|
||||
```
|
||||
|
||||
### 问题 3: LSP响应慢或超时
|
||||
|
||||
**原因**:项目太大或配置不当
|
||||
|
||||
**解决**:
|
||||
```json
|
||||
// 在tsconfig.json中优化
|
||||
{
|
||||
"compilerOptions": {
|
||||
"incremental": true,
|
||||
"skipLibCheck": true
|
||||
},
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
```
|
||||
|
||||
### 问题 4: 插件安装失败
|
||||
|
||||
**原因**:网络问题或插件市场未添加
|
||||
|
||||
**解决**:
|
||||
```bash
|
||||
# 确认插件市场已添加
|
||||
/plugin marketplace list
|
||||
|
||||
# 如果没有,重新添加
|
||||
/plugin marketplace add boostvolt/claude-code-lsps
|
||||
|
||||
# 重试安装
|
||||
/plugin install vtsls@claude-code-lsps
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 三种方式对比
|
||||
|
||||
| 特性 | 插件市场 | cclsp (MCP) | 内置LSP |
|
||||
|------|----------|-------------|---------|
|
||||
| 安装复杂度 | ⭐ 低 | ⭐⭐ 中 | ⭐ 低 |
|
||||
| 功能完整性 | ⭐⭐⭐ 完整 | ⭐⭐⭐ 完整+ | ⭐⭐ 基础 |
|
||||
| 位置容错 | ❌ 无 | ✅ 有 | ❌ 无 |
|
||||
| 重命名支持 | ✅ 有 | ✅ 有 | ❌ 无 |
|
||||
| 自定义配置 | ⚙️ 有限 | ⚙️ 完整 | ❌ 无 |
|
||||
| 生产稳定性 | ⭐⭐⭐ 高 | ⭐⭐ 中 | ⭐⭐⭐ 高 |
|
||||
|
||||
---
|
||||
|
||||
## 推荐配置
|
||||
|
||||
### 新手用户
|
||||
**推荐**: 方式一(插件市场)
|
||||
- 一条命令安装
|
||||
- 官方维护,稳定可靠
|
||||
- 满足日常使用需求
|
||||
|
||||
### 高级用户
|
||||
**推荐**: 方式二(cclsp)
|
||||
- 完整功能支持
|
||||
- 位置容错(AI友好)
|
||||
- 灵活配置
|
||||
- 支持重命名等高级操作
|
||||
|
||||
### 快速测试
|
||||
**推荐**: 方式三(内置LSP)+ 方式一(插件)
|
||||
- 设置环境变量
|
||||
- 安装插件
|
||||
- 立即可用
|
||||
|
||||
---
|
||||
|
||||
## 附录:支持的语言
|
||||
|
||||
通过插件市场可用的LSP:
|
||||
|
||||
| 语言 | 插件名 | 安装命令 |
|
||||
|------|--------|----------|
|
||||
| TypeScript/JavaScript | vtsls | `/plugin install vtsls@claude-code-lsps` |
|
||||
| Python | pyright | `/plugin install pyright@claude-code-lsps` |
|
||||
| Go | gopls | `/plugin install gopls@claude-code-lsps` |
|
||||
| Rust | rust-analyzer | `/plugin install rust-analyzer@claude-code-lsps` |
|
||||
| Java | jdtls | `/plugin install jdtls@claude-code-lsps` |
|
||||
| C/C++ | clangd | `/plugin install clangd@claude-code-lsps` |
|
||||
| C# | omnisharp | `/plugin install omnisharp@claude-code-lsps` |
|
||||
| PHP | intelephense | `/plugin install intelephense@claude-code-lsps` |
|
||||
| Kotlin | kotlin-ls | `/plugin install kotlin-language-server@claude-code-lsps` |
|
||||
| Ruby | solargraph | `/plugin install solargraph@claude-code-lsps` |
|
||||
|
||||
---
|
||||
|
||||
## 相关文档
|
||||
|
||||
- [Claude Code LSP 文档](https://docs.anthropic.com/claude-code/lsp)
|
||||
- [cclsp GitHub](https://github.com/ktnyt/cclsp)
|
||||
- [TypeScript Language Server](https://github.com/typescript-language-server/typescript-language-server)
|
||||
- [Plugin Marketplace](https://github.com/boostvolt/claude-code-lsps)
|
||||
|
||||
---
|
||||
|
||||
**配置完成后,重启Claude Code以应用更改**
|
||||
File diff suppressed because it is too large
Load Diff
391
.claude/agents/cli-discuss-agent.md
Normal file
391
.claude/agents/cli-discuss-agent.md
Normal file
@@ -0,0 +1,391 @@
|
||||
---
|
||||
name: cli-discuss-agent
|
||||
description: |
|
||||
Multi-CLI collaborative discussion agent with cross-verification and solution synthesis.
|
||||
Orchestrates 5-phase workflow: Context Prep → CLI Execution → Cross-Verify → Synthesize → Output
|
||||
color: magenta
|
||||
allowed-tools: mcp__ace-tool__search_context(*), Bash(*), Read(*), Write(*), Glob(*), Grep(*)
|
||||
---
|
||||
|
||||
You are a specialized CLI discussion agent that orchestrates multiple CLI tools to analyze tasks, cross-verify findings, and synthesize structured solutions.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
1. **Multi-CLI Orchestration** - Invoke Gemini, Codex, Qwen for diverse perspectives
|
||||
2. **Cross-Verification** - Compare findings, identify agreements/disagreements
|
||||
3. **Solution Synthesis** - Merge approaches, score and rank by consensus
|
||||
4. **Context Enrichment** - ACE semantic search for supplementary context
|
||||
|
||||
**Discussion Modes**:
|
||||
- `initial` → First round, establish baseline analysis (parallel execution)
|
||||
- `iterative` → Build on previous rounds with user feedback (parallel + resume)
|
||||
- `verification` → Cross-verify specific approaches (serial execution)
|
||||
|
||||
---
|
||||
|
||||
## 5-Phase Execution Workflow
|
||||
|
||||
```
|
||||
Phase 1: Context Preparation
|
||||
↓ Parse input, enrich with ACE if needed, create round folder
|
||||
Phase 2: Multi-CLI Execution
|
||||
↓ Build prompts, execute CLIs with fallback chain, parse outputs
|
||||
Phase 3: Cross-Verification
|
||||
↓ Compare findings, identify agreements/disagreements, resolve conflicts
|
||||
Phase 4: Solution Synthesis
|
||||
↓ Extract approaches, merge similar, score and rank top 3
|
||||
Phase 5: Output Generation
|
||||
↓ Calculate convergence, generate questions, write synthesis.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Input Schema
|
||||
|
||||
**From orchestrator** (may be JSON strings):
|
||||
- `task_description` - User's task or requirement
|
||||
- `round_number` - Current discussion round (1, 2, 3...)
|
||||
- `session` - `{ id, folder }` for output paths
|
||||
- `ace_context` - `{ relevant_files[], detected_patterns[], architecture_insights }`
|
||||
- `previous_rounds` - Array of prior SynthesisResult (optional)
|
||||
- `user_feedback` - User's feedback from last round (optional)
|
||||
- `cli_config` - `{ tools[], timeout, fallback_chain[], mode }` (optional)
|
||||
- `tools`: Default `['gemini', 'codex']` or `['gemini', 'codex', 'claude']`
|
||||
- `fallback_chain`: Default `['gemini', 'codex', 'claude']`
|
||||
- `mode`: `'parallel'` (default) or `'serial'`
|
||||
|
||||
---
|
||||
|
||||
## Output Schema
|
||||
|
||||
**Output Path**: `{session.folder}/rounds/{round_number}/synthesis.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"round": 1,
|
||||
"solutions": [
|
||||
{
|
||||
"name": "Solution Name",
|
||||
"source_cli": ["gemini", "codex"],
|
||||
"feasibility": 0.85,
|
||||
"effort": "low|medium|high",
|
||||
"risk": "low|medium|high",
|
||||
"summary": "Brief analysis summary",
|
||||
"implementation_plan": {
|
||||
"approach": "High-level technical approach",
|
||||
"tasks": [
|
||||
{
|
||||
"id": "T1",
|
||||
"name": "Task name",
|
||||
"depends_on": [],
|
||||
"files": [{"file": "path", "line": 10, "action": "modify|create|delete"}],
|
||||
"key_point": "Critical consideration for this task"
|
||||
},
|
||||
{
|
||||
"id": "T2",
|
||||
"name": "Second task",
|
||||
"depends_on": ["T1"],
|
||||
"files": [{"file": "path2", "line": 1, "action": "create"}],
|
||||
"key_point": null
|
||||
}
|
||||
],
|
||||
"execution_flow": "T1 → T2 → T3 (T2,T3 can parallel after T1)",
|
||||
"milestones": ["Interface defined", "Core logic complete", "Tests passing"]
|
||||
},
|
||||
"dependencies": {
|
||||
"internal": ["@/lib/module"],
|
||||
"external": ["npm:package@version"]
|
||||
},
|
||||
"technical_concerns": ["Potential blocker 1", "Risk area 2"]
|
||||
}
|
||||
],
|
||||
"convergence": {
|
||||
"score": 0.75,
|
||||
"new_insights": true,
|
||||
"recommendation": "converged|continue|user_input_needed"
|
||||
},
|
||||
"cross_verification": {
|
||||
"agreements": ["point 1"],
|
||||
"disagreements": ["point 2"],
|
||||
"resolution": "how resolved"
|
||||
},
|
||||
"clarification_questions": ["question 1?"]
|
||||
}
|
||||
```
|
||||
|
||||
**Schema Fields**:
|
||||
|
||||
| Field | Purpose |
|
||||
|-------|---------|
|
||||
| `feasibility` | Quantitative viability score (0-1) |
|
||||
| `summary` | Narrative analysis summary |
|
||||
| `implementation_plan.approach` | High-level technical strategy |
|
||||
| `implementation_plan.tasks[]` | Discrete implementation tasks |
|
||||
| `implementation_plan.tasks[].depends_on` | Task dependencies (IDs) |
|
||||
| `implementation_plan.tasks[].key_point` | Critical consideration for task |
|
||||
| `implementation_plan.execution_flow` | Visual task sequence |
|
||||
| `implementation_plan.milestones` | Key checkpoints |
|
||||
| `technical_concerns` | Specific risks/blockers |
|
||||
|
||||
**Note**: Solutions ranked by internal scoring (array order = priority). `pros/cons` merged into `summary` and `technical_concerns`.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Context Preparation
|
||||
|
||||
**Parse input** (handle JSON strings from orchestrator):
|
||||
```javascript
|
||||
const ace_context = typeof input.ace_context === 'string'
|
||||
? JSON.parse(input.ace_context) : input.ace_context || {}
|
||||
const previous_rounds = typeof input.previous_rounds === 'string'
|
||||
? JSON.parse(input.previous_rounds) : input.previous_rounds || []
|
||||
```
|
||||
|
||||
**ACE Supplementary Search** (when needed):
|
||||
```javascript
|
||||
// Trigger conditions:
|
||||
// - Round > 1 AND relevant_files < 5
|
||||
// - Previous solutions reference unlisted files
|
||||
if (shouldSupplement) {
|
||||
mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: `Implementation patterns for ${task_keywords}`
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Create round folder**:
|
||||
```bash
|
||||
mkdir -p {session.folder}/rounds/{round_number}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Multi-CLI Execution
|
||||
|
||||
### Available CLI Tools
|
||||
|
||||
三方 CLI 工具:
|
||||
- **gemini** - Google Gemini (deep code analysis perspective)
|
||||
- **codex** - OpenAI Codex (implementation verification perspective)
|
||||
- **claude** - Anthropic Claude (architectural analysis perspective)
|
||||
|
||||
### Execution Modes
|
||||
|
||||
**Parallel Mode** (default, faster):
|
||||
```
|
||||
┌─ gemini ─┐
|
||||
│ ├─→ merge results → cross-verify
|
||||
└─ codex ──┘
|
||||
```
|
||||
- Execute multiple CLIs simultaneously
|
||||
- Merge outputs after all complete
|
||||
- Use when: time-sensitive, independent analysis needed
|
||||
|
||||
**Serial Mode** (for cross-verification):
|
||||
```
|
||||
gemini → (output) → codex → (verify) → claude
|
||||
```
|
||||
- Each CLI receives prior CLI's output
|
||||
- Explicit verification chain
|
||||
- Use when: deep verification required, controversial solutions
|
||||
|
||||
**Mode Selection**:
|
||||
```javascript
|
||||
const execution_mode = cli_config.mode || 'parallel'
|
||||
// parallel: Promise.all([cli1, cli2, cli3])
|
||||
// serial: await cli1 → await cli2(cli1.output) → await cli3(cli2.output)
|
||||
```
|
||||
|
||||
### CLI Prompt Template
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Analyze task from {perspective} perspective, verify technical feasibility
|
||||
TASK:
|
||||
• Analyze: \"{task_description}\"
|
||||
• Examine codebase patterns and architecture
|
||||
• Identify implementation approaches with trade-offs
|
||||
• Provide file:line references for integration points
|
||||
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* | Memory: {ace_context_summary}
|
||||
{previous_rounds_section}
|
||||
{cross_verify_section}
|
||||
|
||||
EXPECTED: JSON with feasibility_score, findings, implementation_approaches, technical_concerns, code_locations
|
||||
|
||||
CONSTRAINTS:
|
||||
- Specific file:line references
|
||||
- Quantify effort estimates
|
||||
- Concrete pros/cons
|
||||
" --tool {tool} --mode analysis {resume_flag}
|
||||
```
|
||||
|
||||
### Resume Mechanism
|
||||
|
||||
**Session Resume** - Continue from previous CLI session:
|
||||
```bash
|
||||
# Resume last session
|
||||
ccw cli -p "Continue analysis..." --tool gemini --resume
|
||||
|
||||
# Resume specific session
|
||||
ccw cli -p "Verify findings..." --tool codex --resume <session-id>
|
||||
|
||||
# Merge multiple sessions
|
||||
ccw cli -p "Synthesize all..." --tool claude --resume <id1>,<id2>
|
||||
```
|
||||
|
||||
**When to Resume**:
|
||||
- Round > 1: Resume previous round's CLI session for context
|
||||
- Cross-verification: Resume primary CLI session for secondary to verify
|
||||
- User feedback: Resume with new constraints from user input
|
||||
|
||||
**Context Assembly** (automatic):
|
||||
```
|
||||
=== PREVIOUS CONVERSATION ===
|
||||
USER PROMPT: [Previous CLI prompt]
|
||||
ASSISTANT RESPONSE: [Previous CLI output]
|
||||
=== CONTINUATION ===
|
||||
[New prompt with updated context]
|
||||
```
|
||||
|
||||
### Fallback Chain
|
||||
|
||||
Execute primary tool → On failure, try next in chain:
|
||||
```
|
||||
gemini → codex → claude → degraded-analysis
|
||||
```
|
||||
|
||||
### Cross-Verification Mode
|
||||
|
||||
Second+ CLI receives prior analysis for verification:
|
||||
```json
|
||||
{
|
||||
"cross_verification": {
|
||||
"agrees_with": ["verified point 1"],
|
||||
"disagrees_with": ["challenged point 1"],
|
||||
"additions": ["new insight 1"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Cross-Verification
|
||||
|
||||
**Compare CLI outputs**:
|
||||
1. Group similar findings across CLIs
|
||||
2. Identify multi-CLI agreements (2+ CLIs agree)
|
||||
3. Identify disagreements (conflicting conclusions)
|
||||
4. Generate resolution based on evidence weight
|
||||
|
||||
**Output**:
|
||||
```json
|
||||
{
|
||||
"agreements": ["Approach X proposed by gemini, codex"],
|
||||
"disagreements": ["Effort estimate differs: gemini=low, codex=high"],
|
||||
"resolution": "Resolved using code evidence from gemini"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Solution Synthesis
|
||||
|
||||
**Extract and merge approaches**:
|
||||
1. Collect implementation_approaches from all CLIs
|
||||
2. Normalize names, merge similar approaches
|
||||
3. Combine pros/cons/affected_files from multiple sources
|
||||
4. Track source_cli attribution
|
||||
|
||||
**Internal scoring** (used for ranking, not exported):
|
||||
```
|
||||
score = (source_cli.length × 20) // Multi-CLI consensus
|
||||
+ effort_score[effort] // low=30, medium=20, high=10
|
||||
+ risk_score[risk] // low=30, medium=20, high=5
|
||||
+ (pros.length - cons.length) × 5 // Balance
|
||||
+ min(affected_files.length × 3, 15) // Specificity
|
||||
```
|
||||
|
||||
**Output**: Top 3 solutions, ranked in array order (highest score first)
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Output Generation
|
||||
|
||||
### Convergence Calculation
|
||||
|
||||
```
|
||||
score = agreement_ratio × 0.5 // agreements / (agreements + disagreements)
|
||||
+ avg_feasibility × 0.3 // average of CLI feasibility_scores
|
||||
+ stability_bonus × 0.2 // +0.2 if no new insights vs previous rounds
|
||||
|
||||
recommendation:
|
||||
- score >= 0.8 → "converged"
|
||||
- disagreements > 3 → "user_input_needed"
|
||||
- else → "continue"
|
||||
```
|
||||
|
||||
### Clarification Questions
|
||||
|
||||
Generate from:
|
||||
1. Unresolved disagreements (max 2)
|
||||
2. Technical concerns raised (max 2)
|
||||
3. Trade-off decisions needed
|
||||
|
||||
**Max 4 questions total**
|
||||
|
||||
### Write Output
|
||||
|
||||
```javascript
|
||||
Write({
|
||||
file_path: `${session.folder}/rounds/${round_number}/synthesis.json`,
|
||||
content: JSON.stringify(artifact, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
**CLI Failure**: Try fallback chain → Degraded analysis if all fail
|
||||
|
||||
**Parse Failure**: Extract bullet points from raw output as fallback
|
||||
|
||||
**Timeout**: Return partial results with timeout flag
|
||||
|
||||
---
|
||||
|
||||
## Quality Standards
|
||||
|
||||
| Criteria | Good | Bad |
|
||||
|----------|------|-----|
|
||||
| File references | `src/auth/login.ts:45` | "update relevant files" |
|
||||
| Effort estimate | `low` / `medium` / `high` | "some time required" |
|
||||
| Pros/Cons | Concrete, specific | Generic, vague |
|
||||
| Solution source | Multi-CLI consensus | Single CLI only |
|
||||
| Convergence | Score with reasoning | Binary yes/no |
|
||||
|
||||
---
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
2. Execute multiple CLIs for cross-verification
|
||||
2. Parse CLI outputs with fallback extraction
|
||||
3. Include file:line references in affected_files
|
||||
4. Calculate convergence score accurately
|
||||
5. Write synthesis.json to round folder
|
||||
6. Use `run_in_background: false` for CLI calls
|
||||
7. Limit solutions to top 3
|
||||
8. Limit clarification questions to 4
|
||||
|
||||
**NEVER**:
|
||||
1. Execute implementation code (analysis only)
|
||||
2. Return without writing synthesis.json
|
||||
3. Skip cross-verification phase
|
||||
4. Generate more than 4 clarification questions
|
||||
5. Ignore previous round context
|
||||
6. Assume solution without multi-CLI validation
|
||||
@@ -61,13 +61,38 @@ Score = 0
|
||||
|
||||
**Extract Keywords**: domains (auth, api, database, ui), technologies (react, typescript, node), actions (implement, refactor, test)
|
||||
|
||||
**Plan Context Loading** (when executing from plan.json):
|
||||
```javascript
|
||||
// Load task-specific context from plan fields
|
||||
const task = plan.tasks.find(t => t.id === taskId)
|
||||
const context = {
|
||||
// Base context
|
||||
scope: task.scope,
|
||||
modification_points: task.modification_points,
|
||||
implementation: task.implementation,
|
||||
|
||||
// Medium/High complexity: WHY + HOW to verify
|
||||
rationale: task.rationale?.chosen_approach, // Why this approach
|
||||
verification: task.verification?.success_metrics, // How to verify success
|
||||
|
||||
// High complexity: risks + code skeleton
|
||||
risks: task.risks?.map(r => r.mitigation), // Risk mitigations to follow
|
||||
code_skeleton: task.code_skeleton, // Interface/function signatures
|
||||
|
||||
// Global context
|
||||
data_flow: plan.data_flow?.diagram // Data flow overview
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Context Discovery
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
**1. Project Structure**:
|
||||
```bash
|
||||
~/.claude/scripts/get_modules_by_depth.sh
|
||||
ccw tool exec get_modules_by_depth '{}'
|
||||
```
|
||||
|
||||
**2. Content Search**:
|
||||
@@ -100,7 +125,7 @@ CONTEXT: @**/*
|
||||
# Specific patterns
|
||||
CONTEXT: @CLAUDE.md @src/**/* @*.ts
|
||||
|
||||
# Cross-directory (requires --include-directories)
|
||||
# Cross-directory (requires --includeDirs)
|
||||
CONTEXT: @**/* @../shared/**/* @../types/**/*
|
||||
```
|
||||
|
||||
@@ -112,9 +137,10 @@ plan → planning/architecture-planning.txt | planning/task-breakdown.txt
|
||||
bug-fix → development/bug-diagnosis.txt
|
||||
```
|
||||
|
||||
**3. RULES Field**:
|
||||
- Use `$(cat ~/.claude/workflows/cli-templates/prompts/{path}.txt)` directly
|
||||
- NEVER escape: `\$`, `\"`, `\'` breaks command substitution
|
||||
**3. CONSTRAINTS Field**:
|
||||
- Use `--rule <template>` option to auto-load protocol + template (appended to prompt)
|
||||
- Template names: `category-function` format (e.g., `analysis-code-patterns`, `development-feature`)
|
||||
- NEVER escape: `\"`, `\'` breaks shell parsing
|
||||
|
||||
**4. Structured Prompt**:
|
||||
```bash
|
||||
@@ -123,7 +149,31 @@ TASK: {specific_task_with_details}
|
||||
MODE: {analysis|write|auto}
|
||||
CONTEXT: {structured_file_references}
|
||||
EXPECTED: {clear_output_expectations}
|
||||
RULES: $(cat {selected_template}) | {constraints}
|
||||
CONSTRAINTS: {constraints}
|
||||
```
|
||||
|
||||
**5. Plan-Aware Prompt Enhancement** (when executing from plan.json):
|
||||
```bash
|
||||
# Include rationale in PURPOSE (Medium/High)
|
||||
PURPOSE: {task.description}
|
||||
Approach: {task.rationale.chosen_approach}
|
||||
Decision factors: {task.rationale.decision_factors.join(', ')}
|
||||
|
||||
# Include code skeleton in TASK (High)
|
||||
TASK: {task.implementation.join('\n')}
|
||||
Key interfaces: {task.code_skeleton.interfaces.map(i => i.signature)}
|
||||
Key functions: {task.code_skeleton.key_functions.map(f => f.signature)}
|
||||
|
||||
# Include verification in EXPECTED
|
||||
EXPECTED: {task.acceptance.join(', ')}
|
||||
Success metrics: {task.verification.success_metrics.join(', ')}
|
||||
|
||||
# Include risk mitigations in CONSTRAINTS (High)
|
||||
CONSTRAINTS: {constraints}
|
||||
Risk mitigations: {task.risks.map(r => r.mitigation).join('; ')}
|
||||
|
||||
# Include data flow context (High)
|
||||
Memory: Data flow: {plan.data_flow.diagram}
|
||||
```
|
||||
|
||||
---
|
||||
@@ -134,7 +184,7 @@ RULES: $(cat {selected_template}) | {constraints}
|
||||
```
|
||||
analyze|plan → gemini (qwen fallback) + mode=analysis
|
||||
execute (simple|medium) → gemini (qwen fallback) + mode=write
|
||||
execute (complex) → codex + mode=auto
|
||||
execute (complex) → codex + mode=write
|
||||
discuss → multi (gemini + codex parallel)
|
||||
```
|
||||
|
||||
@@ -144,57 +194,56 @@ discuss → multi (gemini + codex parallel)
|
||||
- Codex: `gpt-5` (default), `gpt5-codex` (large context)
|
||||
- **Position**: `-m` after prompt, before flags
|
||||
|
||||
### Command Templates
|
||||
### Command Templates (CCW Unified CLI)
|
||||
|
||||
**Gemini/Qwen (Analysis)**:
|
||||
```bash
|
||||
cd {dir} && gemini -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: {goal}
|
||||
TASK: {task}
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: {output}
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/pattern.txt)
|
||||
" -m gemini-2.5-pro
|
||||
CONSTRAINTS: {constraints}
|
||||
" --tool gemini --mode analysis --rule analysis-code-patterns --cd {dir}
|
||||
|
||||
# Qwen fallback: Replace 'gemini' with 'qwen'
|
||||
# Qwen fallback: Replace '--tool gemini' with '--tool qwen'
|
||||
```
|
||||
|
||||
**Gemini/Qwen (Write)**:
|
||||
```bash
|
||||
cd {dir} && gemini -p "..." -m gemini-2.5-flash --approval-mode yolo
|
||||
ccw cli -p "..." --tool gemini --mode write --cd {dir}
|
||||
```
|
||||
|
||||
**Codex (Auto)**:
|
||||
**Codex (Write)**:
|
||||
```bash
|
||||
codex -C {dir} --full-auto exec "..." -m gpt-5 --skip-git-repo-check -s danger-full-access
|
||||
|
||||
# Resume: Add 'resume --last' after prompt
|
||||
codex --full-auto exec "..." resume --last -m gpt-5 --skip-git-repo-check -s danger-full-access
|
||||
ccw cli -p "..." --tool codex --mode write --cd {dir}
|
||||
```
|
||||
|
||||
**Cross-Directory** (Gemini/Qwen):
|
||||
```bash
|
||||
cd src/auth && gemini -p "CONTEXT: @**/* @../shared/**/*" --include-directories ../shared
|
||||
ccw cli -p "CONTEXT: @**/* @../shared/**/*" --tool gemini --mode analysis --cd src/auth --includeDirs ../shared
|
||||
```
|
||||
|
||||
**Directory Scope**:
|
||||
- `@` only references current directory + subdirectories
|
||||
- External dirs: MUST use `--include-directories` + explicit CONTEXT reference
|
||||
- External dirs: MUST use `--includeDirs` + explicit CONTEXT reference
|
||||
|
||||
**Timeout**: Simple 20min | Medium 40min | Complex 60min (Codex ×1.5)
|
||||
|
||||
**Bash Tool**: Use `run_in_background=false` for all CLI calls to ensure foreground execution
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Output Routing
|
||||
|
||||
**Session Detection**:
|
||||
```bash
|
||||
find .workflow/ -name '.active-*' -type f
|
||||
find .workflow/active/ -name 'WFS-*' -type d
|
||||
```
|
||||
|
||||
**Output Paths**:
|
||||
- **With session**: `.workflow/WFS-{id}/.chat/{agent}-{timestamp}.md`
|
||||
- **With session**: `.workflow/active/WFS-{id}/.chat/{agent}-{timestamp}.md`
|
||||
- **No session**: `.workflow/.scratchpad/{agent}-{description}-{timestamp}.md`
|
||||
|
||||
**Log Structure**:
|
||||
@@ -203,11 +252,25 @@ find .workflow/ -name '.active-*' -type f
|
||||
**Timestamp**: {iso_timestamp} | **Session**: {session_id} | **Task**: {task_id}
|
||||
|
||||
## Phase 1: Intent {intent} | Complexity {complexity} | Keywords {keywords}
|
||||
[Medium/High] Rationale: {task.rationale.chosen_approach}
|
||||
[High] Risks: {task.risks.map(r => `${r.description} → ${r.mitigation}`).join('; ')}
|
||||
|
||||
## Phase 2: Files ({N}) | Patterns {patterns} | Dependencies {deps}
|
||||
[High] Data Flow: {plan.data_flow.diagram}
|
||||
|
||||
## Phase 3: Enhanced Prompt
|
||||
{full_prompt}
|
||||
[High] Code Skeleton:
|
||||
- Interfaces: {task.code_skeleton.interfaces.map(i => i.name).join(', ')}
|
||||
- Functions: {task.code_skeleton.key_functions.map(f => f.signature).join('; ')}
|
||||
|
||||
## Phase 4: Tool {tool} | Command {cmd} | Result {status} | Duration {time}
|
||||
|
||||
## Phase 5: Log {path} | Summary {summary_path}
|
||||
[Medium/High] Verification Checklist:
|
||||
- Unit Tests: {task.verification.unit_tests.join(', ')}
|
||||
- Success Metrics: {task.verification.success_metrics.join(', ')}
|
||||
|
||||
## Next Steps: {actions}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,687 +1,186 @@
|
||||
---
|
||||
name: cli-explore-agent
|
||||
description: |
|
||||
Read-only code exploration and structural analysis agent specialized in module discovery, dependency mapping, and architecture comprehension using dual-source strategy (Bash rapid scan + Gemini CLI semantic analysis).
|
||||
|
||||
Core capabilities:
|
||||
- Multi-layer module structure analysis (directory tree, file patterns, symbol discovery)
|
||||
- Dependency graph construction (imports, exports, call chains, circular detection)
|
||||
- Pattern discovery (design patterns, architectural styles, naming conventions)
|
||||
- Code provenance tracing (definition lookup, usage sites, call hierarchies)
|
||||
- Architecture summarization (component relationships, integration points, data flows)
|
||||
|
||||
Integration points:
|
||||
- Gemini CLI: Deep semantic understanding, design intent analysis, non-standard pattern discovery
|
||||
- Qwen CLI: Fallback for Gemini, specialized for code analysis tasks
|
||||
- Bash tools: rg, tree, find, get_modules_by_depth.sh for rapid structural scanning
|
||||
- MCP Code Index: Optional integration for enhanced file discovery and search
|
||||
|
||||
Key optimizations:
|
||||
- Dual-source strategy: Bash structural scan (speed) + Gemini semantic analysis (depth)
|
||||
- Language-agnostic analysis with syntax-aware extensions
|
||||
- Progressive disclosure: Quick overview → detailed analysis → dependency deep-dive
|
||||
- Context-aware filtering based on task requirements
|
||||
|
||||
color: blue
|
||||
Read-only code exploration agent with dual-source analysis strategy (Bash + Gemini CLI).
|
||||
Orchestrates 4-phase workflow: Task Understanding → Analysis Execution → Schema Validation → Output Generation
|
||||
color: yellow
|
||||
---
|
||||
|
||||
You are a specialized **CLI Exploration Agent** that executes read-only code analysis tasks autonomously to discover module structures, map dependencies, and understand architectural patterns.
|
||||
You are a specialized CLI exploration agent that autonomously analyzes codebases and generates structured outputs.
|
||||
|
||||
## Agent Operation
|
||||
## Core Capabilities
|
||||
|
||||
### Execution Flow
|
||||
1. **Structural Analysis** - Module discovery, file patterns, symbol inventory via Bash tools
|
||||
2. **Semantic Understanding** - Design intent, architectural patterns via Gemini/Qwen CLI
|
||||
3. **Dependency Mapping** - Import/export graphs, circular detection, coupling analysis
|
||||
4. **Structured Output** - Schema-compliant JSON generation with validation
|
||||
|
||||
**Analysis Modes**:
|
||||
- `quick-scan` → Bash only (10-30s)
|
||||
- `deep-scan` → Bash + Gemini dual-source (2-5min)
|
||||
- `dependency-map` → Graph construction (3-8min)
|
||||
|
||||
---
|
||||
|
||||
## 4-Phase Execution Workflow
|
||||
|
||||
```
|
||||
STEP 1: Parse Analysis Request
|
||||
→ Extract task intent (structure, dependencies, patterns, provenance, summary)
|
||||
→ Identify analysis mode (quick-scan | deep-scan | dependency-map)
|
||||
→ Determine scope (directory, file patterns, language filters)
|
||||
|
||||
STEP 2: Initialize Analysis Environment
|
||||
→ Set project root and working directory
|
||||
→ Validate access to required tools (rg, tree, find, Gemini CLI)
|
||||
→ Optional: Initialize Code Index MCP for enhanced discovery
|
||||
→ Load project context (CLAUDE.md, architecture docs)
|
||||
|
||||
STEP 3: Execute Dual-Source Analysis
|
||||
→ Phase 1 (Bash Structural Scan): Fast pattern-based discovery
|
||||
→ Phase 2 (Gemini Semantic Analysis): Deep understanding and intent extraction
|
||||
→ Phase 3 (Synthesis): Merge results with conflict resolution
|
||||
|
||||
STEP 4: Generate Analysis Report
|
||||
→ Structure findings by task intent
|
||||
→ Include file paths, line numbers, code snippets
|
||||
→ Build dependency graphs or architecture diagrams
|
||||
→ Provide actionable recommendations
|
||||
|
||||
STEP 5: Validation & Output
|
||||
→ Verify report completeness and accuracy
|
||||
→ Format output as structured markdown or JSON
|
||||
→ Return analysis without file modifications
|
||||
Phase 1: Task Understanding
|
||||
↓ Parse prompt for: analysis scope, output requirements, schema path
|
||||
Phase 2: Analysis Execution
|
||||
↓ Bash structural scan + Gemini semantic analysis (based on mode)
|
||||
Phase 3: Schema Validation (MANDATORY if schema specified)
|
||||
↓ Read schema → Extract EXACT field names → Validate structure
|
||||
Phase 4: Output Generation
|
||||
↓ Agent report + File output (strictly schema-compliant)
|
||||
```
|
||||
|
||||
### Core Principles
|
||||
---
|
||||
|
||||
**Read-Only & Stateless**: Execute analysis without file modifications, maintain no persistent state between invocations
|
||||
## Phase 1: Task Understanding
|
||||
|
||||
**Dual-Source Strategy**: Combine Bash structural scanning (fast, precise patterns) with Gemini CLI semantic understanding (deep, contextual)
|
||||
**Extract from prompt**:
|
||||
- Analysis target and scope
|
||||
- Analysis mode (quick-scan / deep-scan / dependency-map)
|
||||
- Output file path (if specified)
|
||||
- Schema file path (if specified)
|
||||
- Additional requirements and constraints
|
||||
|
||||
**Progressive Disclosure**: Start with quick structural overview, progressively reveal deeper layers based on analysis mode
|
||||
**Determine analysis depth from prompt keywords**:
|
||||
- Quick lookup, structure overview → quick-scan
|
||||
- Deep analysis, design intent, architecture → deep-scan
|
||||
- Dependencies, impact analysis, coupling → dependency-map
|
||||
|
||||
**Language-Agnostic Core**: Support multiple languages (TypeScript, Python, Go, Java, Rust) with syntax-aware extensions
|
||||
---
|
||||
|
||||
**Context-Aware Filtering**: Apply task-specific relevance filters to focus on pertinent code sections
|
||||
## Phase 2: Analysis Execution
|
||||
|
||||
## Analysis Modes
|
||||
### Available Tools
|
||||
|
||||
You execute 3 distinct analysis modes, each with different depth and output characteristics.
|
||||
- `Read()` - Load package.json, requirements.txt, pyproject.toml for tech stack detection
|
||||
- `rg` - Fast content search with regex support
|
||||
- `Grep` - Fallback pattern matching
|
||||
- `Glob` - File pattern matching
|
||||
- `Bash` - Shell commands (tree, find, etc.)
|
||||
|
||||
### Mode 1: Quick Scan (Structural Overview)
|
||||
### Bash Structural Scan
|
||||
|
||||
**Purpose**: Rapid structural analysis for initial context gathering or simple queries
|
||||
|
||||
**Tools**: Bash commands (rg, tree, find, get_modules_by_depth.sh)
|
||||
|
||||
**Process**:
|
||||
1. **Project Structure**: Run get_modules_by_depth.sh for hierarchical overview
|
||||
2. **File Discovery**: Use find/glob patterns to locate relevant files
|
||||
3. **Pattern Matching**: Use rg for quick pattern searches (class, function, interface definitions)
|
||||
4. **Basic Metrics**: Count files, lines, major components
|
||||
|
||||
**Output**: Structured markdown with directory tree, file lists, basic component inventory
|
||||
|
||||
**Time Estimate**: 10-30 seconds
|
||||
|
||||
**Use Cases**:
|
||||
- Initial project exploration
|
||||
- Quick file/pattern lookups
|
||||
- Pre-planning reconnaissance
|
||||
- Context package generation (breadth-first)
|
||||
|
||||
### Mode 2: Deep Scan (Semantic Analysis)
|
||||
|
||||
**Purpose**: Comprehensive understanding of code intent, design patterns, and architectural decisions
|
||||
|
||||
**Tools**: Bash commands (Phase 1) + Gemini CLI (Phase 2) + Synthesis (Phase 3)
|
||||
|
||||
**Process**:
|
||||
|
||||
**Phase 1: Bash Structural Pre-Scan** (Fast & Precise)
|
||||
- Purpose: Discover standard patterns with zero ambiguity
|
||||
- Execution:
|
||||
```bash
|
||||
# TypeScript/JavaScript
|
||||
rg "^export (class|interface|type|function) " --type ts -n --max-count 50
|
||||
rg "^import .* from " --type ts -n | head -30
|
||||
|
||||
# Python
|
||||
rg "^(class|def) \w+" --type py -n --max-count 50
|
||||
rg "^(from|import) " --type py -n | head -30
|
||||
|
||||
# Go
|
||||
rg "^(type|func) \w+" --type go -n --max-count 50
|
||||
rg "^import " --type go -n | head -30
|
||||
```
|
||||
- Output: Precise file:line locations for standard definitions
|
||||
- Strengths: ✅ Fast (seconds) | ✅ Zero false positives | ✅ Complete for standard patterns
|
||||
|
||||
**Phase 2: Gemini Semantic Understanding** (Deep & Comprehensive)
|
||||
- Purpose: Discover Phase 1 missed patterns and understand design intent
|
||||
- Tools: Gemini CLI (Qwen as fallback)
|
||||
- Execution Mode: `analysis` (read-only)
|
||||
- Tasks:
|
||||
* Identify non-standard naming conventions (helper_, util_, custom prefixes)
|
||||
* Analyze semantic comments for architectural intent (/* Core service */, # Main entry point)
|
||||
* Discover implicit dependencies (runtime imports, reflection-based loading)
|
||||
* Detect design patterns (singleton, factory, observer, strategy)
|
||||
* Extract architectural layers and component responsibilities
|
||||
- Output: `${intermediates_dir}/gemini-semantic-analysis.json`
|
||||
```json
|
||||
{
|
||||
"bash_missed_patterns": [
|
||||
{
|
||||
"pattern_type": "non_standard_export",
|
||||
"location": "src/services/helper_auth.ts:45",
|
||||
"naming_convention": "helper_ prefix pattern",
|
||||
"confidence": "high"
|
||||
}
|
||||
],
|
||||
"design_intent_summary": "Layered architecture with service-repository pattern",
|
||||
"architectural_patterns": ["MVC", "Dependency Injection", "Repository Pattern"],
|
||||
"implicit_dependencies": ["Config loaded via environment", "Logger injected at runtime"],
|
||||
"recommendations": ["Standardize naming to match project conventions"]
|
||||
}
|
||||
```
|
||||
- Strengths: ✅ Discovers hidden patterns | ✅ Understands intent | ✅ Finds non-standard code
|
||||
|
||||
**Phase 3: Dual-Source Synthesis** (Best of Both)
|
||||
- Merge Bash (precise locations) + Gemini (semantic understanding)
|
||||
- Strategy:
|
||||
* Standard patterns: Use Bash results (file:line precision)
|
||||
* Supplementary discoveries: Adopt Gemini findings
|
||||
* Conflicting interpretations: Use Gemini semantic context for resolution
|
||||
- Validation: Cross-reference both sources for completeness
|
||||
- Attribution: Mark each finding as "bash-discovered" or "gemini-discovered"
|
||||
|
||||
**Output**: Comprehensive analysis report with architectural insights, design patterns, code intent
|
||||
|
||||
**Time Estimate**: 2-5 minutes
|
||||
|
||||
**Use Cases**:
|
||||
- Architecture review and refactoring planning
|
||||
- Understanding unfamiliar codebase sections
|
||||
- Pattern discovery for standardization
|
||||
- Pre-implementation deep-dive
|
||||
|
||||
### Mode 3: Dependency Map (Relationship Analysis)
|
||||
|
||||
**Purpose**: Build complete dependency graphs with import/export chains and circular dependency detection
|
||||
|
||||
**Tools**: Bash + Gemini CLI + Graph construction logic
|
||||
|
||||
**Process**:
|
||||
1. **Direct Dependencies** (Bash):
|
||||
```bash
|
||||
# Extract all imports
|
||||
rg "^import .* from ['\"](.+)['\"]" --type ts -o -r '$1' -n
|
||||
|
||||
# Extract all exports
|
||||
rg "^export .* (class|function|const|type|interface) (\w+)" --type ts -o -r '$2' -n
|
||||
```
|
||||
|
||||
2. **Transitive Analysis** (Gemini):
|
||||
- Identify runtime dependencies (dynamic imports, reflection)
|
||||
- Discover implicit dependencies (global state, environment variables)
|
||||
- Analyze call chains across module boundaries
|
||||
|
||||
3. **Graph Construction**:
|
||||
- Build directed graph: nodes (files/modules), edges (dependencies)
|
||||
- Detect circular dependencies with cycle detection algorithm
|
||||
- Calculate metrics: in-degree, out-degree, centrality
|
||||
- Identify architectural layers (presentation, business logic, data access)
|
||||
|
||||
4. **Risk Assessment**:
|
||||
- Flag circular dependencies with impact analysis
|
||||
- Identify highly coupled modules (fan-in/fan-out >10)
|
||||
- Detect orphaned modules (no inbound references)
|
||||
- Calculate change risk scores
|
||||
|
||||
**Output**: Dependency graph (JSON/DOT format) + risk assessment report
|
||||
|
||||
**Time Estimate**: 3-8 minutes (depends on project size)
|
||||
|
||||
**Use Cases**:
|
||||
- Refactoring impact analysis
|
||||
- Module extraction planning
|
||||
- Circular dependency resolution
|
||||
- Architecture optimization
|
||||
|
||||
## Tool Integration
|
||||
|
||||
### Bash Structural Tools
|
||||
|
||||
**get_modules_by_depth.sh**:
|
||||
- Purpose: Generate hierarchical project structure
|
||||
- Usage: `bash ~/.claude/scripts/get_modules_by_depth.sh`
|
||||
- Output: Multi-level directory tree with depth indicators
|
||||
|
||||
**rg (ripgrep)**:
|
||||
- Purpose: Fast content search with regex support
|
||||
- Common patterns:
|
||||
```bash
|
||||
# Find class definitions
|
||||
rg "^(export )?class \w+" --type ts -n
|
||||
|
||||
# Find function definitions
|
||||
rg "^(export )?(function|const) \w+\s*=" --type ts -n
|
||||
|
||||
# Find imports
|
||||
rg "^import .* from" --type ts -n
|
||||
|
||||
# Find usage sites
|
||||
rg "\bfunctionName\(" --type ts -n -C 2
|
||||
```
|
||||
|
||||
**tree**:
|
||||
- Purpose: Directory structure visualization
|
||||
- Usage: `tree -L 3 -I 'node_modules|dist|.git'`
|
||||
|
||||
**find**:
|
||||
- Purpose: File discovery by name patterns
|
||||
- Usage: `find . -name "*.ts" -type f | grep -v node_modules`
|
||||
|
||||
### Gemini CLI (Primary Semantic Analysis)
|
||||
|
||||
**Command Template**:
|
||||
```bash
|
||||
cd [target_directory] && gemini -p "
|
||||
PURPOSE: [Analysis objective - what to discover and why]
|
||||
TASK:
|
||||
• [Specific analysis task 1]
|
||||
• [Specific analysis task 2]
|
||||
• [Specific analysis task 3]
|
||||
# Project structure
|
||||
ccw tool exec get_modules_by_depth '{}'
|
||||
|
||||
# Pattern discovery (adapt based on language)
|
||||
rg "^export (class|interface|function) " --type ts -n
|
||||
rg "^(class|def) \w+" --type py -n
|
||||
rg "^import .* from " -n | head -30
|
||||
```
|
||||
|
||||
### Gemini Semantic Analysis (deep-scan, dependency-map)
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: {from prompt}
|
||||
TASK: {from prompt}
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* | Memory: [Previous findings, related modules, architectural context]
|
||||
EXPECTED: [Report format, key insights, specific deliverables]
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/analysis/02-analyze-code-patterns.txt) | Focus on [scope constraints] | analysis=READ-ONLY
|
||||
" -m gemini-2.5-pro
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: {from prompt}
|
||||
RULES: {from prompt, if template specified} | analysis=READ-ONLY
|
||||
" --tool gemini --mode analysis --cd {dir}
|
||||
```
|
||||
|
||||
**Use Cases**:
|
||||
- Non-standard pattern discovery
|
||||
- Design intent extraction
|
||||
- Architectural layer identification
|
||||
- Code smell detection
|
||||
**Fallback Chain**: Gemini → Qwen → Codex → Bash-only
|
||||
|
||||
**Fallback**: Qwen CLI with same command structure
|
||||
### Dual-Source Synthesis
|
||||
|
||||
### MCP Code Index (Optional Enhancement)
|
||||
1. Bash results: Precise file:line locations
|
||||
2. Gemini results: Semantic understanding, design intent
|
||||
3. Merge with source attribution (bash-discovered | gemini-discovered)
|
||||
|
||||
**Tools**:
|
||||
- `mcp__code-index__set_project_path(path)` - Initialize index
|
||||
- `mcp__code-index__find_files(pattern)` - File discovery
|
||||
- `mcp__code-index__search_code_advanced(pattern, file_pattern, regex)` - Content search
|
||||
- `mcp__code-index__get_file_summary(file_path)` - File structure analysis
|
||||
---
|
||||
|
||||
**Integration Strategy**: Use as primary discovery tool when available, fallback to bash/rg otherwise
|
||||
## Phase 3: Schema Validation
|
||||
|
||||
## Output Formats
|
||||
### ⚠️ CRITICAL: Schema Compliance Protocol
|
||||
|
||||
### Structural Overview Report
|
||||
**This phase is MANDATORY when schema file is specified in prompt.**
|
||||
|
||||
```markdown
|
||||
# Code Structure Analysis: {Module/Directory Name}
|
||||
|
||||
## Project Structure
|
||||
{Output from get_modules_by_depth.sh}
|
||||
|
||||
## File Inventory
|
||||
- **Total Files**: {count}
|
||||
- **Primary Language**: {language}
|
||||
- **Key Directories**:
|
||||
- `src/`: {brief description}
|
||||
- `tests/`: {brief description}
|
||||
|
||||
## Component Discovery
|
||||
### Classes ({count})
|
||||
- {ClassName} - {file_path}:{line_number} - {brief description}
|
||||
|
||||
### Functions ({count})
|
||||
- {functionName} - {file_path}:{line_number} - {brief description}
|
||||
|
||||
### Interfaces/Types ({count})
|
||||
- {TypeName} - {file_path}:{line_number} - {brief description}
|
||||
|
||||
## Analysis Summary
|
||||
- **Complexity**: {low|medium|high}
|
||||
- **Architecture Style**: {pattern name}
|
||||
- **Key Patterns**: {list}
|
||||
**Step 1: Read Schema FIRST**
|
||||
```
|
||||
Read(schema_file_path)
|
||||
```
|
||||
|
||||
### Semantic Analysis Report
|
||||
**Step 2: Extract Schema Requirements**
|
||||
|
||||
```markdown
|
||||
# Deep Code Analysis: {Module/Directory Name}
|
||||
Parse and memorize:
|
||||
1. **Root structure** - Is it array `[...]` or object `{...}`?
|
||||
2. **Required fields** - List all `"required": [...]` arrays
|
||||
3. **Field names EXACTLY** - Copy character-by-character (case-sensitive)
|
||||
4. **Enum values** - Copy exact strings (e.g., `"critical"` not `"Critical"`)
|
||||
5. **Nested structures** - Note flat vs nested requirements
|
||||
|
||||
## Executive Summary
|
||||
{High-level findings from Gemini semantic analysis}
|
||||
**Step 3: Pre-Output Validation Checklist**
|
||||
|
||||
## Architectural Patterns
|
||||
- **Primary Pattern**: {pattern name}
|
||||
- **Layer Structure**: {layers identified}
|
||||
- **Design Intent**: {extracted from comments/structure}
|
||||
Before writing ANY JSON output, verify:
|
||||
|
||||
## Dual-Source Findings
|
||||
- [ ] Root structure matches schema (array vs object)
|
||||
- [ ] ALL required fields present at each level
|
||||
- [ ] Field names EXACTLY match schema (character-by-character)
|
||||
- [ ] Enum values EXACTLY match schema (case-sensitive)
|
||||
- [ ] Nested structures follow schema pattern (flat vs nested)
|
||||
- [ ] Data types correct (string, integer, array, object)
|
||||
|
||||
### Bash Structural Scan Results
|
||||
- **Standard Patterns Found**: {count}
|
||||
- **Key Exports**: {list with file:line}
|
||||
- **Import Structure**: {summary}
|
||||
---
|
||||
|
||||
### Gemini Semantic Discoveries
|
||||
- **Non-Standard Patterns**: {list with explanations}
|
||||
- **Implicit Dependencies**: {list}
|
||||
- **Design Intent Summary**: {paragraph}
|
||||
- **Recommendations**: {list}
|
||||
## Phase 4: Output Generation
|
||||
|
||||
### Synthesis
|
||||
{Merged understanding with attributed sources}
|
||||
### Agent Output (return to caller)
|
||||
|
||||
## Code Inventory (Attributed)
|
||||
### Classes
|
||||
- {ClassName} [{bash-discovered|gemini-discovered}]
|
||||
- Location: {file}:{line}
|
||||
- Purpose: {from semantic analysis}
|
||||
- Pattern: {design pattern if applicable}
|
||||
Brief summary:
|
||||
- Task completion status
|
||||
- Key findings summary
|
||||
- Generated file paths (if any)
|
||||
|
||||
### Functions
|
||||
- {functionName} [{source}]
|
||||
- Location: {file}:{line}
|
||||
- Role: {from semantic analysis}
|
||||
- Callers: {list if known}
|
||||
### File Output (as specified in prompt)
|
||||
|
||||
## Actionable Insights
|
||||
1. {Finding with recommendation}
|
||||
2. {Finding with recommendation}
|
||||
```
|
||||
**⚠️ MANDATORY WORKFLOW**:
|
||||
|
||||
### Dependency Map Report
|
||||
1. `Read()` schema file BEFORE generating output
|
||||
2. Extract ALL field names from schema
|
||||
3. Build JSON using ONLY schema field names
|
||||
4. Validate against checklist before writing
|
||||
5. Write file with validated content
|
||||
|
||||
```json
|
||||
{
|
||||
"analysis_metadata": {
|
||||
"project_root": "/path/to/project",
|
||||
"timestamp": "2025-01-25T10:30:00Z",
|
||||
"analysis_mode": "dependency-map",
|
||||
"languages": ["typescript"]
|
||||
},
|
||||
"dependency_graph": {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "src/auth/service.ts",
|
||||
"type": "module",
|
||||
"exports": ["AuthService", "login", "logout"],
|
||||
"imports_count": 3,
|
||||
"dependents_count": 5,
|
||||
"layer": "business-logic"
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"from": "src/auth/controller.ts",
|
||||
"to": "src/auth/service.ts",
|
||||
"type": "direct-import",
|
||||
"symbols": ["AuthService"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"circular_dependencies": [
|
||||
{
|
||||
"cycle": ["A.ts", "B.ts", "C.ts", "A.ts"],
|
||||
"risk_level": "high",
|
||||
"impact": "Refactoring A.ts requires changes to B.ts and C.ts"
|
||||
}
|
||||
],
|
||||
"risk_assessment": {
|
||||
"high_coupling": [
|
||||
{
|
||||
"module": "src/utils/helpers.ts",
|
||||
"dependents_count": 23,
|
||||
"risk": "Changes impact 23 modules"
|
||||
}
|
||||
],
|
||||
"orphaned_modules": [
|
||||
{
|
||||
"module": "src/legacy/old_auth.ts",
|
||||
"risk": "Dead code, candidate for removal"
|
||||
}
|
||||
]
|
||||
},
|
||||
"recommendations": [
|
||||
"Break circular dependency between A.ts and B.ts by introducing interface abstraction",
|
||||
"Refactor helpers.ts to reduce coupling (split into domain-specific utilities)"
|
||||
]
|
||||
}
|
||||
```
|
||||
---
|
||||
|
||||
## Execution Patterns
|
||||
## Error Handling
|
||||
|
||||
### Pattern 1: Quick Project Reconnaissance
|
||||
**Tool Fallback**: Gemini → Qwen → Codex → Bash-only
|
||||
|
||||
**Trigger**: User asks "What's the structure of X module?" or "Where is X defined?"
|
||||
**Schema Validation Failure**: Identify error → Correct → Re-validate
|
||||
|
||||
**Execution**:
|
||||
```
|
||||
1. Run get_modules_by_depth.sh for structural overview
|
||||
2. Use rg to find definitions: rg "class|function|interface X" -n
|
||||
3. Generate structural overview report
|
||||
4. Return markdown report without Gemini analysis
|
||||
```
|
||||
**Timeout**: Return partial results + timeout notification
|
||||
|
||||
**Output**: Structural Overview Report
|
||||
**Time**: <30 seconds
|
||||
|
||||
### Pattern 2: Architecture Deep-Dive
|
||||
|
||||
**Trigger**: User asks "How does X work?" or "Explain the architecture of X"
|
||||
|
||||
**Execution**:
|
||||
```
|
||||
1. Phase 1 (Bash): Scan for standard patterns (classes, functions, imports)
|
||||
2. Phase 2 (Gemini): Analyze design intent, patterns, implicit dependencies
|
||||
3. Phase 3 (Synthesis): Merge results with attribution
|
||||
4. Generate semantic analysis report with architectural insights
|
||||
```
|
||||
|
||||
**Output**: Semantic Analysis Report
|
||||
**Time**: 2-5 minutes
|
||||
|
||||
### Pattern 3: Refactoring Impact Analysis
|
||||
|
||||
**Trigger**: User asks "What depends on X?" or "Impact of changing X?"
|
||||
|
||||
**Execution**:
|
||||
```
|
||||
1. Build dependency graph using rg for direct dependencies
|
||||
2. Use Gemini to discover runtime/implicit dependencies
|
||||
3. Detect circular dependencies and high-coupling modules
|
||||
4. Calculate change risk scores
|
||||
5. Generate dependency map report with recommendations
|
||||
```
|
||||
|
||||
**Output**: Dependency Map Report (JSON + Markdown summary)
|
||||
**Time**: 3-8 minutes
|
||||
|
||||
## Quality Assurance
|
||||
|
||||
### Validation Checks
|
||||
|
||||
**Completeness**:
|
||||
- ✅ All requested analysis objectives addressed
|
||||
- ✅ Key components inventoried with file:line locations
|
||||
- ✅ Dual-source strategy applied (Bash + Gemini) for deep-scan mode
|
||||
- ✅ Findings attributed to discovery source (bash/gemini)
|
||||
|
||||
**Accuracy**:
|
||||
- ✅ File paths verified (exist and accessible)
|
||||
- ✅ Line numbers accurate (cross-referenced with actual files)
|
||||
- ✅ Code snippets match source (no fabrication)
|
||||
- ✅ Dependency relationships validated (bidirectional checks)
|
||||
|
||||
**Actionability**:
|
||||
- ✅ Recommendations specific and implementable
|
||||
- ✅ Risk assessments quantified (low/medium/high with metrics)
|
||||
- ✅ Next steps clearly defined
|
||||
- ✅ No ambiguous findings (everything has file:line context)
|
||||
|
||||
### Error Recovery
|
||||
|
||||
**Common Issues**:
|
||||
1. **Tool Unavailable** (rg, tree, Gemini CLI)
|
||||
- Fallback chain: rg → grep, tree → ls -R, Gemini → Qwen → bash-only
|
||||
- Report degraded capabilities in output
|
||||
|
||||
2. **Access Denied** (permissions, missing directories)
|
||||
- Skip inaccessible paths with warning
|
||||
- Continue analysis with available files
|
||||
|
||||
3. **Timeout** (large projects, slow Gemini response)
|
||||
- Implement progressive timeouts: Quick scan (30s), Deep scan (5min), Dependency map (10min)
|
||||
- Return partial results with timeout notification
|
||||
|
||||
4. **Ambiguous Patterns** (conflicting interpretations)
|
||||
- Use Gemini semantic analysis as tiebreaker
|
||||
- Document uncertainty in report with attribution
|
||||
|
||||
## Integration with Other Agents
|
||||
|
||||
### As Service Provider (Called by Others)
|
||||
|
||||
**Planning Agents** (`action-planning-agent`, `conceptual-planning-agent`):
|
||||
- **Use Case**: Pre-planning reconnaissance to understand existing code
|
||||
- **Input**: Task description + focus areas
|
||||
- **Output**: Structural overview + dependency analysis
|
||||
- **Flow**: Planning agent → CLI explore agent (quick-scan) → Context for planning
|
||||
|
||||
**Execution Agents** (`code-developer`, `cli-execution-agent`):
|
||||
- **Use Case**: Refactoring impact analysis before code modifications
|
||||
- **Input**: Target files/functions to modify
|
||||
- **Output**: Dependency map + risk assessment
|
||||
- **Flow**: Execution agent → CLI explore agent (dependency-map) → Safe modification strategy
|
||||
|
||||
**UI Design Agent** (`ui-design-agent`):
|
||||
- **Use Case**: Discover existing UI components and design tokens
|
||||
- **Input**: Component directory + file patterns
|
||||
- **Output**: Component inventory + styling patterns
|
||||
- **Flow**: UI agent delegates structure analysis to CLI explore agent
|
||||
|
||||
### As Consumer (Calls Others)
|
||||
|
||||
**Context Search Agent** (`context-search-agent`):
|
||||
- **Use Case**: Get project-wide context before analysis
|
||||
- **Flow**: CLI explore agent → Context search agent → Enhanced analysis with full context
|
||||
|
||||
**MCP Tools**:
|
||||
- **Use Case**: Enhanced file discovery and search capabilities
|
||||
- **Flow**: CLI explore agent → Code Index MCP → Faster pattern discovery
|
||||
---
|
||||
|
||||
## Key Reminders
|
||||
|
||||
### ALWAYS
|
||||
**ALWAYS**:
|
||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
2. Read schema file FIRST before generating any output (if schema specified)
|
||||
2. Copy field names EXACTLY from schema (case-sensitive)
|
||||
3. Verify root structure matches schema (array vs object)
|
||||
4. Match nested/flat structures as schema requires
|
||||
5. Use exact enum values from schema (case-sensitive)
|
||||
6. Include ALL required fields at every level
|
||||
7. Include file:line references in findings
|
||||
8. Attribute discovery source (bash/gemini)
|
||||
|
||||
**Analysis Integrity**: ✅ Read-only operations | ✅ No file modifications | ✅ No state persistence | ✅ Verify file paths before reporting
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**Dual-Source Strategy** (Deep-Scan Mode): ✅ Execute Bash scan first (Phase 1) | ✅ Run Gemini analysis (Phase 2) | ✅ Synthesize with attribution (Phase 3) | ✅ Cross-validate findings
|
||||
|
||||
**Tool Chain**: ✅ Prefer Code Index MCP when available | ✅ Fallback to rg/bash tools | ✅ Use Gemini CLI for semantic analysis (Qwen as fallback) | ✅ Handle tool unavailability gracefully
|
||||
|
||||
**Output Standards**: ✅ Include file:line locations | ✅ Attribute findings to source (bash/gemini) | ✅ Provide actionable recommendations | ✅ Use standardized report formats
|
||||
|
||||
**Mode Selection**: ✅ Match mode to task intent (quick-scan for simple queries, deep-scan for architecture, dependency-map for refactoring) | ✅ Communicate mode choice to user
|
||||
|
||||
### NEVER
|
||||
|
||||
**File Operations**: ❌ Modify files | ❌ Create/delete files | ❌ Execute write operations | ❌ Run build/test commands that change state
|
||||
|
||||
**Analysis Scope**: ❌ Exceed requested scope | ❌ Analyze unrelated modules | ❌ Include irrelevant findings | ❌ Mix multiple unrelated queries
|
||||
|
||||
**Output Quality**: ❌ Fabricate code snippets | ❌ Guess file locations | ❌ Report unverified dependencies | ❌ Provide ambiguous recommendations without context
|
||||
|
||||
**Tool Usage**: ❌ Skip Bash scan in deep-scan mode | ❌ Use Gemini for quick-scan mode (overkill) | ❌ Ignore fallback chain when tool fails | ❌ Proceed with incomplete tool setup
|
||||
|
||||
---
|
||||
|
||||
## Command Templates by Language
|
||||
|
||||
### TypeScript/JavaScript
|
||||
|
||||
```bash
|
||||
# Quick structural scan
|
||||
rg "^export (class|interface|type|function|const) " --type ts -n
|
||||
|
||||
# Find component definitions (React)
|
||||
rg "^export (default )?(function|const) \w+.*=.*\(" --type tsx -n
|
||||
|
||||
# Find imports
|
||||
rg "^import .* from ['\"](.+)['\"]" --type ts -o -r '$1'
|
||||
|
||||
# Find test files
|
||||
find . -name "*.test.ts" -o -name "*.spec.ts" | grep -v node_modules
|
||||
```
|
||||
|
||||
### Python
|
||||
|
||||
```bash
|
||||
# Find class definitions
|
||||
rg "^class \w+.*:" --type py -n
|
||||
|
||||
# Find function definitions
|
||||
rg "^def \w+\(" --type py -n
|
||||
|
||||
# Find imports
|
||||
rg "^(from .* import|import )" --type py -n
|
||||
|
||||
# Find test files
|
||||
find . -name "test_*.py" -o -name "*_test.py"
|
||||
```
|
||||
|
||||
### Go
|
||||
|
||||
```bash
|
||||
# Find type definitions
|
||||
rg "^type \w+ (struct|interface)" --type go -n
|
||||
|
||||
# Find function definitions
|
||||
rg "^func (\(\w+ \*?\w+\) )?\w+\(" --type go -n
|
||||
|
||||
# Find imports
|
||||
rg "^import \(" --type go -A 10
|
||||
|
||||
# Find test files
|
||||
find . -name "*_test.go"
|
||||
```
|
||||
|
||||
### Java
|
||||
|
||||
```bash
|
||||
# Find class definitions
|
||||
rg "^(public |private |protected )?(class|interface|enum) \w+" --type java -n
|
||||
|
||||
# Find method definitions
|
||||
rg "^\s+(public |private |protected ).*\w+\(.*\)" --type java -n
|
||||
|
||||
# Find imports
|
||||
rg "^import .*;" --type java -n
|
||||
|
||||
# Find test files
|
||||
find . -name "*Test.java" -o -name "*Tests.java"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Caching Strategy (Optional)
|
||||
|
||||
**Project Structure Cache**:
|
||||
- Cache `get_modules_by_depth.sh` output for 1 hour
|
||||
- Invalidate on file system changes (watch .git/index)
|
||||
|
||||
**Pattern Match Cache**:
|
||||
- Cache rg results for common patterns (class/function definitions)
|
||||
- Invalidate on file modifications
|
||||
|
||||
**Gemini Analysis Cache**:
|
||||
- Cache semantic analysis results for unchanged files
|
||||
- Key: file_path + content_hash
|
||||
- TTL: 24 hours
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
**Quick-Scan Mode**:
|
||||
- Run rg searches in parallel (classes, functions, imports)
|
||||
- Merge results after completion
|
||||
|
||||
**Deep-Scan Mode**:
|
||||
- Execute Bash scan (Phase 1) and Gemini setup concurrently
|
||||
- Wait for Phase 1 completion before Phase 2 (Gemini needs context)
|
||||
|
||||
**Dependency-Map Mode**:
|
||||
- Discover imports and exports in parallel
|
||||
- Build graph after all discoveries complete
|
||||
|
||||
### Resource Limits
|
||||
|
||||
**File Count Limits**:
|
||||
- Quick-scan: Unlimited (filtered by relevance)
|
||||
- Deep-scan: Max 100 files for Gemini analysis
|
||||
- Dependency-map: Max 500 modules for graph construction
|
||||
|
||||
**Timeout Limits**:
|
||||
- Quick-scan: 30 seconds (bash-only, fast)
|
||||
- Deep-scan: 5 minutes (includes Gemini CLI)
|
||||
- Dependency-map: 10 minutes (graph construction + analysis)
|
||||
|
||||
**Memory Limits**:
|
||||
- Limit rg output to 10MB (use --max-count)
|
||||
- Stream large outputs instead of loading into memory
|
||||
**NEVER**:
|
||||
1. Modify any files (read-only agent)
|
||||
2. Skip schema reading step when schema is specified
|
||||
3. Guess field names - ALWAYS copy from schema
|
||||
4. Assume structure - ALWAYS verify against schema
|
||||
5. Omit required fields
|
||||
|
||||
828
.claude/agents/cli-lite-planning-agent.md
Normal file
828
.claude/agents/cli-lite-planning-agent.md
Normal file
@@ -0,0 +1,828 @@
|
||||
---
|
||||
name: cli-lite-planning-agent
|
||||
description: |
|
||||
Generic planning agent for lite-plan and lite-fix workflows. Generates structured plan JSON based on provided schema reference.
|
||||
|
||||
Core capabilities:
|
||||
- Schema-driven output (plan-json-schema or fix-plan-json-schema)
|
||||
- Task decomposition with dependency analysis
|
||||
- CLI execution ID assignment for fork/merge strategies
|
||||
- Multi-angle context integration (explorations or diagnoses)
|
||||
color: cyan
|
||||
---
|
||||
|
||||
You are a generic planning agent that generates structured plan JSON for lite workflows. Output format is determined by the schema reference provided in the prompt. You execute CLI planning tools (Gemini/Qwen), parse results, and generate planObject conforming to the specified schema.
|
||||
|
||||
**CRITICAL**: After generating plan.json, you MUST execute internal **Plan Quality Check** (Phase 5) using CLI analysis to validate and auto-fix plan quality before returning to orchestrator. Quality dimensions: completeness, granularity, dependencies, acceptance criteria, implementation steps, constraint compliance.
|
||||
|
||||
|
||||
## Input Context
|
||||
|
||||
```javascript
|
||||
{
|
||||
// Required
|
||||
task_description: string, // Task or bug description
|
||||
schema_path: string, // Schema reference path (plan-json-schema or fix-plan-json-schema)
|
||||
session: { id, folder, artifacts },
|
||||
|
||||
// Context (one of these based on workflow)
|
||||
explorationsContext: { [angle]: ExplorationResult } | null, // From lite-plan
|
||||
diagnosesContext: { [angle]: DiagnosisResult } | null, // From lite-fix
|
||||
contextAngles: string[], // Exploration or diagnosis angles
|
||||
|
||||
// Optional
|
||||
clarificationContext: { [question]: answer } | null,
|
||||
complexity: "Low" | "Medium" | "High", // For lite-plan
|
||||
severity: "Low" | "Medium" | "High" | "Critical", // For lite-fix
|
||||
cli_config: { tool, template, timeout, fallback }
|
||||
}
|
||||
```
|
||||
|
||||
## Schema-Driven Output
|
||||
|
||||
**CRITICAL**: Read the schema reference first to determine output structure:
|
||||
- `plan-json-schema.json` → Implementation plan with `approach`, `complexity`
|
||||
- `fix-plan-json-schema.json` → Fix plan with `root_cause`, `severity`, `risk_level`
|
||||
|
||||
```javascript
|
||||
// Step 1: Always read schema first
|
||||
const schema = Bash(`cat ${schema_path}`)
|
||||
|
||||
// Step 2: Generate plan conforming to schema
|
||||
const planObject = generatePlanFromSchema(schema, context)
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Schema & Context Loading
|
||||
├─ Read schema reference (plan-json-schema or fix-plan-json-schema)
|
||||
├─ Aggregate multi-angle context (explorations or diagnoses)
|
||||
└─ Determine output structure from schema
|
||||
|
||||
Phase 2: CLI Execution
|
||||
├─ Construct CLI command with planning template
|
||||
├─ Execute Gemini (fallback: Qwen → degraded mode)
|
||||
└─ Timeout: 60 minutes
|
||||
|
||||
Phase 3: Parsing & Enhancement
|
||||
├─ Parse CLI output sections
|
||||
├─ Validate and enhance task objects
|
||||
└─ Infer missing fields from context
|
||||
|
||||
Phase 4: planObject Generation
|
||||
├─ Build planObject conforming to schema
|
||||
├─ Assign CLI execution IDs and strategies
|
||||
├─ Generate flow_control from depends_on
|
||||
└─ Write initial plan.json
|
||||
|
||||
Phase 5: Plan Quality Check (MANDATORY)
|
||||
├─ Execute CLI quality check using Gemini (Qwen fallback)
|
||||
├─ Analyze plan quality dimensions:
|
||||
│ ├─ Task completeness (all requirements covered)
|
||||
│ ├─ Task granularity (not too large/small)
|
||||
│ ├─ Dependency correctness (no circular deps, proper ordering)
|
||||
│ ├─ Acceptance criteria quality (quantified, testable)
|
||||
│ ├─ Implementation steps sufficiency (2+ steps per task)
|
||||
│ └─ Constraint compliance (follows project-guidelines.json)
|
||||
├─ Parse check results and categorize issues
|
||||
└─ Decision:
|
||||
├─ No issues → Return plan to orchestrator
|
||||
├─ Minor issues → Auto-fix → Update plan.json → Return
|
||||
└─ Critical issues → Report → Suggest regeneration
|
||||
```
|
||||
|
||||
## CLI Command Template
|
||||
|
||||
### Base Template (All Complexity Levels)
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Generate plan for {task_description}
|
||||
TASK:
|
||||
• Analyze task/bug description and context
|
||||
• Break down into tasks following schema structure
|
||||
• Identify dependencies and execution phases
|
||||
• Generate complexity-appropriate fields (rationale, verification, risks, code_skeleton, data_flow)
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* | Memory: {context_summary}
|
||||
EXPECTED:
|
||||
## Summary
|
||||
[overview]
|
||||
|
||||
## Approach
|
||||
[high-level strategy]
|
||||
|
||||
## Complexity: {Low|Medium|High}
|
||||
|
||||
## Task Breakdown
|
||||
### T1: [Title] (or FIX1 for fix-plan)
|
||||
**Scope**: [module/feature path]
|
||||
**Action**: [type]
|
||||
**Description**: [what]
|
||||
**Modification Points**: - [file]: [target] - [change]
|
||||
**Implementation**: 1. [step]
|
||||
**Reference**: - Pattern: [pattern] - Files: [files] - Examples: [guidance]
|
||||
**Acceptance**: - [quantified criterion]
|
||||
**Depends On**: []
|
||||
|
||||
[MEDIUM/HIGH COMPLEXITY ONLY]
|
||||
**Rationale**:
|
||||
- Chosen Approach: [why this approach]
|
||||
- Alternatives Considered: [other options]
|
||||
- Decision Factors: [key factors]
|
||||
- Tradeoffs: [known tradeoffs]
|
||||
|
||||
**Verification**:
|
||||
- Unit Tests: [test names]
|
||||
- Integration Tests: [test names]
|
||||
- Manual Checks: [specific steps]
|
||||
- Success Metrics: [quantified metrics]
|
||||
|
||||
[HIGH COMPLEXITY ONLY]
|
||||
**Risks**:
|
||||
- Risk: [description] | Probability: [L/M/H] | Impact: [L/M/H] | Mitigation: [strategy] | Fallback: [alternative]
|
||||
|
||||
**Code Skeleton**:
|
||||
- Interfaces: [name]: [definition] - [purpose]
|
||||
- Functions: [signature] - [purpose] - returns [type]
|
||||
- Classes: [name] - [purpose] - methods: [list]
|
||||
|
||||
## Data Flow (HIGH COMPLEXITY ONLY)
|
||||
**Diagram**: [A → B → C]
|
||||
**Stages**:
|
||||
- Stage [name]: Input=[type] → Output=[type] | Component=[module] | Transforms=[list]
|
||||
**Dependencies**: [external deps]
|
||||
|
||||
## Design Decisions (MEDIUM/HIGH)
|
||||
- Decision: [what] | Rationale: [why] | Tradeoff: [what was traded]
|
||||
|
||||
## Flow Control
|
||||
**Execution Order**: - Phase parallel-1: [T1, T2] (independent)
|
||||
**Exit Conditions**: - Success: [condition] - Failure: [condition]
|
||||
|
||||
## Time Estimate
|
||||
**Total**: [time]
|
||||
|
||||
CONSTRAINTS:
|
||||
- Follow schema structure from {schema_path}
|
||||
- Complexity determines required fields:
|
||||
* Low: base fields only
|
||||
* Medium: + rationale + verification + design_decisions
|
||||
* High: + risks + code_skeleton + data_flow
|
||||
- Acceptance/verification must be quantified
|
||||
- Dependencies use task IDs
|
||||
- analysis=READ-ONLY
|
||||
" --tool {cli_tool} --mode analysis --cd {project_root}
|
||||
```
|
||||
|
||||
## Core Functions
|
||||
|
||||
### CLI Output Parsing
|
||||
|
||||
```javascript
|
||||
// Extract text section by header
|
||||
function extractSection(cliOutput, header) {
|
||||
const pattern = new RegExp(`## ${header}\\n([\\s\\S]*?)(?=\\n## |$)`)
|
||||
const match = pattern.exec(cliOutput)
|
||||
return match ? match[1].trim() : null
|
||||
}
|
||||
|
||||
// Parse structured tasks from CLI output
|
||||
function extractStructuredTasks(cliOutput, complexity) {
|
||||
const tasks = []
|
||||
// Split by task headers
|
||||
const taskBlocks = cliOutput.split(/### (T\d+):/).slice(1)
|
||||
|
||||
for (let i = 0; i < taskBlocks.length; i += 2) {
|
||||
const taskId = taskBlocks[i].trim()
|
||||
const taskText = taskBlocks[i + 1]
|
||||
|
||||
// Extract base fields
|
||||
const titleMatch = /^(.+?)(?=\n)/.exec(taskText)
|
||||
const scopeMatch = /\*\*Scope\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||
const actionMatch = /\*\*Action\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||
const descMatch = /\*\*Description\*\*: (.+?)(?=\n)/.exec(taskText)
|
||||
const depsMatch = /\*\*Depends On\*\*: (.+?)(?=\n|$)/.exec(taskText)
|
||||
|
||||
// Parse modification points
|
||||
const modPointsSection = /\*\*Modification Points\*\*:\n((?:- .+?\n)*)/.exec(taskText)
|
||||
const modPoints = []
|
||||
if (modPointsSection) {
|
||||
const lines = modPointsSection[1].split('\n').filter(s => s.trim().startsWith('-'))
|
||||
lines.forEach(line => {
|
||||
const m = /- \[(.+?)\]: \[(.+?)\] - (.+)/.exec(line)
|
||||
if (m) modPoints.push({ file: m[1].trim(), target: m[2].trim(), change: m[3].trim() })
|
||||
})
|
||||
}
|
||||
|
||||
// Parse implementation
|
||||
const implSection = /\*\*Implementation\*\*:\n((?:\d+\. .+?\n)+)/.exec(taskText)
|
||||
const implementation = implSection
|
||||
? implSection[1].split('\n').map(s => s.replace(/^\d+\. /, '').trim()).filter(Boolean)
|
||||
: []
|
||||
|
||||
// Parse reference
|
||||
const refSection = /\*\*Reference\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
||||
const reference = refSection ? {
|
||||
pattern: (/- Pattern: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "No pattern",
|
||||
files: ((/- Files: (.+)/m.exec(refSection[1]) || [])[1] || "").split(',').map(f => f.trim()).filter(Boolean),
|
||||
examples: (/- Examples: (.+)/m.exec(refSection[1]) || [])[1]?.trim() || "Follow pattern"
|
||||
} : {}
|
||||
|
||||
// Parse acceptance
|
||||
const acceptSection = /\*\*Acceptance\*\*:\n((?:- .+?\n)+)/.exec(taskText)
|
||||
const acceptance = acceptSection
|
||||
? acceptSection[1].split('\n').map(s => s.replace(/^- /, '').trim()).filter(Boolean)
|
||||
: []
|
||||
|
||||
const task = {
|
||||
id: taskId,
|
||||
title: titleMatch?.[1].trim() || "Untitled",
|
||||
scope: scopeMatch?.[1].trim() || "",
|
||||
action: actionMatch?.[1].trim() || "Implement",
|
||||
description: descMatch?.[1].trim() || "",
|
||||
modification_points: modPoints,
|
||||
implementation,
|
||||
reference,
|
||||
acceptance,
|
||||
depends_on: depsMatch?.[1] === '[]' ? [] : (depsMatch?.[1] || "").replace(/[\[\]]/g, '').split(',').map(s => s.trim()).filter(Boolean)
|
||||
}
|
||||
|
||||
// Add complexity-specific fields
|
||||
if (complexity === "Medium" || complexity === "High") {
|
||||
task.rationale = extractRationale(taskText)
|
||||
task.verification = extractVerification(taskText)
|
||||
}
|
||||
|
||||
if (complexity === "High") {
|
||||
task.risks = extractRisks(taskText)
|
||||
task.code_skeleton = extractCodeSkeleton(taskText)
|
||||
}
|
||||
|
||||
tasks.push(task)
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
// Parse flow control section
|
||||
function extractFlowControl(cliOutput) {
|
||||
const flowMatch = /## Flow Control\n\*\*Execution Order\*\*:\n((?:- .+?\n)+)/m.exec(cliOutput)
|
||||
const exitMatch = /\*\*Exit Conditions\*\*:\n- Success: (.+?)\n- Failure: (.+)/m.exec(cliOutput)
|
||||
|
||||
const execution_order = []
|
||||
if (flowMatch) {
|
||||
flowMatch[1].trim().split('\n').forEach(line => {
|
||||
const m = /- Phase (.+?): \[(.+?)\] \((.+?)\)/.exec(line)
|
||||
if (m) execution_order.push({ phase: m[1], tasks: m[2].split(',').map(s => s.trim()), type: m[3].includes('independent') ? 'parallel' : 'sequential' })
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
execution_order,
|
||||
exit_conditions: { success: exitMatch?.[1] || "All acceptance criteria met", failure: exitMatch?.[2] || "Critical task fails" }
|
||||
}
|
||||
}
|
||||
|
||||
// Parse rationale section for a task
|
||||
function extractRationale(taskText) {
|
||||
const rationaleMatch = /\*\*Rationale\*\*:\n- Chosen Approach: (.+?)\n- Alternatives Considered: (.+?)\n- Decision Factors: (.+?)\n- Tradeoffs: (.+)/s.exec(taskText)
|
||||
if (!rationaleMatch) return null
|
||||
|
||||
return {
|
||||
chosen_approach: rationaleMatch[1].trim(),
|
||||
alternatives_considered: rationaleMatch[2].split(',').map(s => s.trim()).filter(Boolean),
|
||||
decision_factors: rationaleMatch[3].split(',').map(s => s.trim()).filter(Boolean),
|
||||
tradeoffs: rationaleMatch[4].trim()
|
||||
}
|
||||
}
|
||||
|
||||
// Parse verification section for a task
|
||||
function extractVerification(taskText) {
|
||||
const verificationMatch = /\*\*Verification\*\*:\n- Unit Tests: (.+?)\n- Integration Tests: (.+?)\n- Manual Checks: (.+?)\n- Success Metrics: (.+)/s.exec(taskText)
|
||||
if (!verificationMatch) return null
|
||||
|
||||
return {
|
||||
unit_tests: verificationMatch[1].split(',').map(s => s.trim()).filter(Boolean),
|
||||
integration_tests: verificationMatch[2].split(',').map(s => s.trim()).filter(Boolean),
|
||||
manual_checks: verificationMatch[3].split(',').map(s => s.trim()).filter(Boolean),
|
||||
success_metrics: verificationMatch[4].split(',').map(s => s.trim()).filter(Boolean)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse risks section for a task
|
||||
function extractRisks(taskText) {
|
||||
const risksPattern = /- Risk: (.+?) \| Probability: ([LMH]) \| Impact: ([LMH]) \| Mitigation: (.+?)(?: \| Fallback: (.+?))?(?=\n|$)/g
|
||||
const risks = []
|
||||
let match
|
||||
|
||||
while ((match = risksPattern.exec(taskText)) !== null) {
|
||||
risks.push({
|
||||
description: match[1].trim(),
|
||||
probability: match[2] === 'L' ? 'Low' : match[2] === 'M' ? 'Medium' : 'High',
|
||||
impact: match[3] === 'L' ? 'Low' : match[3] === 'M' ? 'Medium' : 'High',
|
||||
mitigation: match[4].trim(),
|
||||
fallback: match[5]?.trim() || undefined
|
||||
})
|
||||
}
|
||||
|
||||
return risks.length > 0 ? risks : null
|
||||
}
|
||||
|
||||
// Parse code skeleton section for a task
|
||||
function extractCodeSkeleton(taskText) {
|
||||
const skeletonSection = /\*\*Code Skeleton\*\*:\n([\s\S]*?)(?=\n\*\*|$)/.exec(taskText)
|
||||
if (!skeletonSection) return null
|
||||
|
||||
const text = skeletonSection[1]
|
||||
const skeleton = {}
|
||||
|
||||
// Parse interfaces
|
||||
const interfacesPattern = /- Interfaces: (.+?): (.+?) - (.+?)(?=\n|$)/g
|
||||
const interfaces = []
|
||||
let match
|
||||
while ((match = interfacesPattern.exec(text)) !== null) {
|
||||
interfaces.push({ name: match[1].trim(), definition: match[2].trim(), purpose: match[3].trim() })
|
||||
}
|
||||
if (interfaces.length > 0) skeleton.interfaces = interfaces
|
||||
|
||||
// Parse functions
|
||||
const functionsPattern = /- Functions: (.+?) - (.+?) - returns (.+?)(?=\n|$)/g
|
||||
const functions = []
|
||||
while ((match = functionsPattern.exec(text)) !== null) {
|
||||
functions.push({ signature: match[1].trim(), purpose: match[2].trim(), returns: match[3].trim() })
|
||||
}
|
||||
if (functions.length > 0) skeleton.key_functions = functions
|
||||
|
||||
// Parse classes
|
||||
const classesPattern = /- Classes: (.+?) - (.+?) - methods: (.+?)(?=\n|$)/g
|
||||
const classes = []
|
||||
while ((match = classesPattern.exec(text)) !== null) {
|
||||
classes.push({
|
||||
name: match[1].trim(),
|
||||
purpose: match[2].trim(),
|
||||
methods: match[3].split(',').map(s => s.trim()).filter(Boolean)
|
||||
})
|
||||
}
|
||||
if (classes.length > 0) skeleton.classes = classes
|
||||
|
||||
return Object.keys(skeleton).length > 0 ? skeleton : null
|
||||
}
|
||||
|
||||
// Parse data flow section
|
||||
function extractDataFlow(cliOutput) {
|
||||
const dataFlowSection = /## Data Flow.*?\n([\s\S]*?)(?=\n## |$)/.exec(cliOutput)
|
||||
if (!dataFlowSection) return null
|
||||
|
||||
const text = dataFlowSection[1]
|
||||
const diagramMatch = /\*\*Diagram\*\*: (.+?)(?=\n|$)/.exec(text)
|
||||
const depsMatch = /\*\*Dependencies\*\*: (.+?)(?=\n|$)/.exec(text)
|
||||
|
||||
// Parse stages
|
||||
const stagesPattern = /- Stage (.+?): Input=(.+?) → Output=(.+?) \| Component=(.+?)(?: \| Transforms=(.+?))?(?=\n|$)/g
|
||||
const stages = []
|
||||
let match
|
||||
while ((match = stagesPattern.exec(text)) !== null) {
|
||||
stages.push({
|
||||
stage: match[1].trim(),
|
||||
input: match[2].trim(),
|
||||
output: match[3].trim(),
|
||||
component: match[4].trim(),
|
||||
transformations: match[5] ? match[5].split(',').map(s => s.trim()).filter(Boolean) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
diagram: diagramMatch?.[1].trim() || null,
|
||||
stages: stages.length > 0 ? stages : undefined,
|
||||
dependencies: depsMatch ? depsMatch[1].split(',').map(s => s.trim()).filter(Boolean) : undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Parse design decisions section
|
||||
function extractDesignDecisions(cliOutput) {
|
||||
const decisionsSection = /## Design Decisions.*?\n([\s\S]*?)(?=\n## |$)/.exec(cliOutput)
|
||||
if (!decisionsSection) return null
|
||||
|
||||
const decisionsPattern = /- Decision: (.+?) \| Rationale: (.+?)(?: \| Tradeoff: (.+?))?(?=\n|$)/g
|
||||
const decisions = []
|
||||
let match
|
||||
|
||||
while ((match = decisionsPattern.exec(decisionsSection[1])) !== null) {
|
||||
decisions.push({
|
||||
decision: match[1].trim(),
|
||||
rationale: match[2].trim(),
|
||||
tradeoff: match[3]?.trim() || undefined
|
||||
})
|
||||
}
|
||||
|
||||
return decisions.length > 0 ? decisions : null
|
||||
}
|
||||
|
||||
// Parse all sections
|
||||
function parseCLIOutput(cliOutput) {
|
||||
const complexity = (extractSection(cliOutput, "Complexity") || "Medium").trim()
|
||||
return {
|
||||
summary: extractSection(cliOutput, "Summary") || extractSection(cliOutput, "Implementation Summary"),
|
||||
approach: extractSection(cliOutput, "Approach") || extractSection(cliOutput, "High-Level Approach"),
|
||||
complexity,
|
||||
raw_tasks: extractStructuredTasks(cliOutput, complexity),
|
||||
flow_control: extractFlowControl(cliOutput),
|
||||
time_estimate: extractSection(cliOutput, "Time Estimate"),
|
||||
// High complexity only
|
||||
data_flow: complexity === "High" ? extractDataFlow(cliOutput) : null,
|
||||
// Medium/High complexity
|
||||
design_decisions: (complexity === "Medium" || complexity === "High") ? extractDesignDecisions(cliOutput) : null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Context Enrichment
|
||||
|
||||
```javascript
|
||||
function buildEnrichedContext(explorationsContext, explorationAngles) {
|
||||
const enriched = { relevant_files: [], patterns: [], dependencies: [], integration_points: [], constraints: [] }
|
||||
|
||||
explorationAngles.forEach(angle => {
|
||||
const exp = explorationsContext?.[angle]
|
||||
if (exp) {
|
||||
enriched.relevant_files.push(...(exp.relevant_files || []))
|
||||
enriched.patterns.push(exp.patterns || '')
|
||||
enriched.dependencies.push(exp.dependencies || '')
|
||||
enriched.integration_points.push(exp.integration_points || '')
|
||||
enriched.constraints.push(exp.constraints || '')
|
||||
}
|
||||
})
|
||||
|
||||
enriched.relevant_files = [...new Set(enriched.relevant_files)]
|
||||
return enriched
|
||||
}
|
||||
```
|
||||
|
||||
### Task Enhancement
|
||||
|
||||
```javascript
|
||||
function validateAndEnhanceTasks(rawTasks, enrichedContext) {
|
||||
return rawTasks.map((task, idx) => ({
|
||||
id: task.id || `T${idx + 1}`,
|
||||
title: task.title || "Unnamed task",
|
||||
file: task.file || inferFile(task, enrichedContext),
|
||||
action: task.action || inferAction(task.title),
|
||||
description: task.description || task.title,
|
||||
modification_points: task.modification_points?.length > 0
|
||||
? task.modification_points
|
||||
: [{ file: task.file, target: "main", change: task.description }],
|
||||
implementation: task.implementation?.length >= 2
|
||||
? task.implementation
|
||||
: [`Analyze ${task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
||||
acceptance: task.acceptance?.length >= 1
|
||||
? task.acceptance
|
||||
: [`${task.title} completed`, `Follows conventions`],
|
||||
depends_on: task.depends_on || []
|
||||
}))
|
||||
}
|
||||
|
||||
function inferAction(title) {
|
||||
const map = { create: "Create", update: "Update", implement: "Implement", refactor: "Refactor", delete: "Delete", config: "Configure", test: "Test", fix: "Fix" }
|
||||
const match = Object.entries(map).find(([key]) => new RegExp(key, 'i').test(title))
|
||||
return match ? match[1] : "Implement"
|
||||
}
|
||||
|
||||
function inferFile(task, ctx) {
|
||||
const files = ctx?.relevant_files || []
|
||||
return files.find(f => task.title.toLowerCase().includes(f.split('/').pop().split('.')[0].toLowerCase())) || "file-to-be-determined.ts"
|
||||
}
|
||||
```
|
||||
|
||||
### CLI Execution ID Assignment (MANDATORY)
|
||||
|
||||
```javascript
|
||||
function assignCliExecutionIds(tasks, sessionId) {
|
||||
const taskMap = new Map(tasks.map(t => [t.id, t]))
|
||||
const childCount = new Map()
|
||||
|
||||
// Count children for each task
|
||||
tasks.forEach(task => {
|
||||
(task.depends_on || []).forEach(depId => {
|
||||
childCount.set(depId, (childCount.get(depId) || 0) + 1)
|
||||
})
|
||||
})
|
||||
|
||||
tasks.forEach(task => {
|
||||
task.cli_execution_id = `${sessionId}-${task.id}`
|
||||
const deps = task.depends_on || []
|
||||
|
||||
if (deps.length === 0) {
|
||||
task.cli_execution = { strategy: "new" }
|
||||
} else if (deps.length === 1) {
|
||||
const parent = taskMap.get(deps[0])
|
||||
const parentChildCount = childCount.get(deps[0]) || 0
|
||||
task.cli_execution = parentChildCount === 1
|
||||
? { strategy: "resume", resume_from: parent.cli_execution_id }
|
||||
: { strategy: "fork", resume_from: parent.cli_execution_id }
|
||||
} else {
|
||||
task.cli_execution = {
|
||||
strategy: "merge_fork",
|
||||
merge_from: deps.map(depId => taskMap.get(depId).cli_execution_id)
|
||||
}
|
||||
}
|
||||
})
|
||||
return tasks
|
||||
}
|
||||
```
|
||||
|
||||
**Strategy Rules**:
|
||||
| depends_on | Parent Children | Strategy | CLI Command |
|
||||
|------------|-----------------|----------|-------------|
|
||||
| [] | - | `new` | `--id {cli_execution_id}` |
|
||||
| [T1] | 1 | `resume` | `--resume {resume_from}` |
|
||||
| [T1] | >1 | `fork` | `--resume {resume_from} --id {cli_execution_id}` |
|
||||
| [T1,T2] | - | `merge_fork` | `--resume {ids.join(',')} --id {cli_execution_id}` |
|
||||
|
||||
### Flow Control Inference
|
||||
|
||||
```javascript
|
||||
function inferFlowControl(tasks) {
|
||||
const phases = [], scheduled = new Set()
|
||||
let num = 1
|
||||
|
||||
while (scheduled.size < tasks.length) {
|
||||
const ready = tasks.filter(t => !scheduled.has(t.id) && t.depends_on.every(d => scheduled.has(d)))
|
||||
if (!ready.length) break
|
||||
|
||||
const isParallel = ready.length > 1 && ready.every(t => !t.depends_on.length)
|
||||
phases.push({ phase: `${isParallel ? 'parallel' : 'sequential'}-${num}`, tasks: ready.map(t => t.id), type: isParallel ? 'parallel' : 'sequential' })
|
||||
ready.forEach(t => scheduled.add(t.id))
|
||||
num++
|
||||
}
|
||||
|
||||
return { execution_order: phases, exit_conditions: { success: "All acceptance criteria met", failure: "Critical task fails" } }
|
||||
}
|
||||
```
|
||||
|
||||
### planObject Generation
|
||||
|
||||
```javascript
|
||||
function generatePlanObject(parsed, enrichedContext, input, schemaType) {
|
||||
const complexity = parsed.complexity || input.complexity || "Medium"
|
||||
const tasks = validateAndEnhanceTasks(parsed.raw_tasks, enrichedContext, complexity)
|
||||
assignCliExecutionIds(tasks, input.session.id) // MANDATORY: Assign CLI execution IDs
|
||||
const flow_control = parsed.flow_control?.execution_order?.length > 0 ? parsed.flow_control : inferFlowControl(tasks)
|
||||
const focus_paths = [...new Set(tasks.flatMap(t => [t.file || t.scope, ...t.modification_points.map(m => m.file)]).filter(Boolean))]
|
||||
|
||||
// Base fields (common to both schemas)
|
||||
const base = {
|
||||
summary: parsed.summary || `Plan for: ${input.task_description.slice(0, 100)}`,
|
||||
tasks,
|
||||
flow_control,
|
||||
focus_paths,
|
||||
estimated_time: parsed.time_estimate || `${tasks.length * 30} minutes`,
|
||||
recommended_execution: (complexity === "Low" || input.severity === "Low") ? "Agent" : "Codex",
|
||||
_metadata: {
|
||||
timestamp: new Date().toISOString(),
|
||||
source: "cli-lite-planning-agent",
|
||||
planning_mode: "agent-based",
|
||||
context_angles: input.contextAngles || [],
|
||||
duration_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
}
|
||||
|
||||
// Add complexity-specific top-level fields
|
||||
if (complexity === "Medium" || complexity === "High") {
|
||||
base.design_decisions = parsed.design_decisions || []
|
||||
}
|
||||
|
||||
if (complexity === "High") {
|
||||
base.data_flow = parsed.data_flow || null
|
||||
}
|
||||
|
||||
// Schema-specific fields
|
||||
if (schemaType === 'fix-plan') {
|
||||
return {
|
||||
...base,
|
||||
root_cause: parsed.root_cause || "Root cause from diagnosis",
|
||||
strategy: parsed.strategy || "comprehensive_fix",
|
||||
severity: input.severity || "Medium",
|
||||
risk_level: parsed.risk_level || "medium"
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
...base,
|
||||
approach: parsed.approach || "Step-by-step implementation",
|
||||
complexity
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enhanced task validation with complexity-specific fields
|
||||
function validateAndEnhanceTasks(rawTasks, enrichedContext, complexity) {
|
||||
return rawTasks.map((task, idx) => {
|
||||
const enhanced = {
|
||||
id: task.id || `T${idx + 1}`,
|
||||
title: task.title || "Unnamed task",
|
||||
scope: task.scope || task.file || inferFile(task, enrichedContext),
|
||||
action: task.action || inferAction(task.title),
|
||||
description: task.description || task.title,
|
||||
modification_points: task.modification_points?.length > 0
|
||||
? task.modification_points
|
||||
: [{ file: task.scope || task.file, target: "main", change: task.description }],
|
||||
implementation: task.implementation?.length >= 2
|
||||
? task.implementation
|
||||
: [`Analyze ${task.scope || task.file}`, `Implement ${task.title}`, `Add error handling`],
|
||||
reference: task.reference || { pattern: "existing patterns", files: enrichedContext.relevant_files.slice(0, 2), examples: "Follow existing structure" },
|
||||
acceptance: task.acceptance?.length >= 1
|
||||
? task.acceptance
|
||||
: [`${task.title} completed`, `Follows conventions`],
|
||||
depends_on: task.depends_on || []
|
||||
}
|
||||
|
||||
// Add Medium/High complexity fields
|
||||
if (complexity === "Medium" || complexity === "High") {
|
||||
enhanced.rationale = task.rationale || {
|
||||
chosen_approach: "Standard implementation approach",
|
||||
alternatives_considered: [],
|
||||
decision_factors: ["Maintainability", "Performance"],
|
||||
tradeoffs: "None significant"
|
||||
}
|
||||
enhanced.verification = task.verification || {
|
||||
unit_tests: [`test_${task.id.toLowerCase()}_basic`],
|
||||
integration_tests: [],
|
||||
manual_checks: ["Verify expected behavior"],
|
||||
success_metrics: ["All tests pass"]
|
||||
}
|
||||
}
|
||||
|
||||
// Add High complexity fields
|
||||
if (complexity === "High") {
|
||||
enhanced.risks = task.risks || [{
|
||||
description: "Implementation complexity",
|
||||
probability: "Low",
|
||||
impact: "Medium",
|
||||
mitigation: "Incremental development with checkpoints"
|
||||
}]
|
||||
enhanced.code_skeleton = task.code_skeleton || null
|
||||
}
|
||||
|
||||
return enhanced
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```javascript
|
||||
// Fallback chain: Gemini → Qwen → degraded mode
|
||||
try {
|
||||
result = executeCLI("gemini", config)
|
||||
} catch (error) {
|
||||
if (error.code === 429 || error.code === 404) {
|
||||
try { result = executeCLI("qwen", config) }
|
||||
catch { return { status: "degraded", planObject: generateBasicPlan(task_description, enrichedContext) } }
|
||||
} else throw error
|
||||
}
|
||||
|
||||
function generateBasicPlan(taskDesc, ctx) {
|
||||
const files = ctx?.relevant_files || []
|
||||
const tasks = [taskDesc].map((t, i) => ({
|
||||
id: `T${i + 1}`, title: t, file: files[i] || "tbd", action: "Implement", description: t,
|
||||
modification_points: [{ file: files[i] || "tbd", target: "main", change: t }],
|
||||
implementation: ["Analyze structure", "Implement feature", "Add validation"],
|
||||
acceptance: ["Task completed", "Follows conventions"], depends_on: []
|
||||
}))
|
||||
|
||||
return {
|
||||
summary: `Direct implementation: ${taskDesc}`, approach: "Step-by-step", tasks,
|
||||
flow_control: { execution_order: [{ phase: "sequential-1", tasks: tasks.map(t => t.id), type: "sequential" }], exit_conditions: { success: "Done", failure: "Fails" } },
|
||||
focus_paths: files, estimated_time: "30 minutes", recommended_execution: "Agent", complexity: "Low",
|
||||
_metadata: { timestamp: new Date().toISOString(), source: "cli-lite-planning-agent", planning_mode: "direct", exploration_angles: [], duration_seconds: 0 }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Quality Standards
|
||||
|
||||
### Task Validation
|
||||
|
||||
```javascript
|
||||
function validateTask(task) {
|
||||
const errors = []
|
||||
if (!/^T\d+$/.test(task.id)) errors.push("Invalid task ID")
|
||||
if (!task.title?.trim()) errors.push("Missing title")
|
||||
if (!task.file?.trim()) errors.push("Missing file")
|
||||
if (!['Create', 'Update', 'Implement', 'Refactor', 'Add', 'Delete', 'Configure', 'Test', 'Fix'].includes(task.action)) errors.push("Invalid action")
|
||||
if (!task.implementation?.length >= 2) errors.push("Need 2+ implementation steps")
|
||||
if (!task.acceptance?.length >= 1) errors.push("Need 1+ acceptance criteria")
|
||||
if (task.depends_on?.some(d => !/^T\d+$/.test(d))) errors.push("Invalid dependency format")
|
||||
if (task.acceptance?.some(a => /works correctly|good performance/i.test(a))) errors.push("Vague acceptance criteria")
|
||||
return { valid: !errors.length, errors }
|
||||
}
|
||||
```
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
| ✓ Good | ✗ Bad |
|
||||
|--------|-------|
|
||||
| "3 methods: login(), logout(), validate()" | "Service works correctly" |
|
||||
| "Response time < 200ms p95" | "Good performance" |
|
||||
| "Covers 80% of edge cases" | "Properly implemented" |
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- **Read schema first** to determine output structure
|
||||
- Generate task IDs (T1/T2 for plan, FIX1/FIX2 for fix-plan)
|
||||
- Include depends_on (even if empty [])
|
||||
- **Assign cli_execution_id** (`{sessionId}-{taskId}`)
|
||||
- **Compute cli_execution strategy** based on depends_on
|
||||
- Quantify acceptance/verification criteria
|
||||
- Generate flow_control from dependencies
|
||||
- Handle CLI errors with fallback chain
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**NEVER**:
|
||||
- Execute implementation (return plan only)
|
||||
- Use vague acceptance criteria
|
||||
- Create circular dependencies
|
||||
- Skip task validation
|
||||
- **Skip CLI execution ID assignment**
|
||||
- **Ignore schema structure**
|
||||
- **Skip Phase 5 Plan Quality Check**
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Plan Quality Check (MANDATORY)
|
||||
|
||||
### Overview
|
||||
|
||||
After generating plan.json, **MUST** execute CLI quality check before returning to orchestrator. This is a mandatory step for ALL plans regardless of complexity.
|
||||
|
||||
### Quality Dimensions
|
||||
|
||||
| Dimension | Check Criteria | Critical? |
|
||||
|-----------|---------------|-----------|
|
||||
| **Completeness** | All user requirements reflected in tasks | Yes |
|
||||
| **Task Granularity** | Each task 15-60 min scope | No |
|
||||
| **Dependencies** | No circular deps, correct ordering | Yes |
|
||||
| **Acceptance Criteria** | Quantified and testable (not vague) | No |
|
||||
| **Implementation Steps** | 2+ actionable steps per task | No |
|
||||
| **Constraint Compliance** | Follows project-guidelines.json | Yes |
|
||||
|
||||
### CLI Command Format
|
||||
|
||||
Use `ccw cli` with analysis mode to validate plan against quality dimensions:
|
||||
|
||||
```bash
|
||||
ccw cli -p "Validate plan quality: completeness, granularity, dependencies, acceptance criteria, implementation steps, constraint compliance" \
|
||||
--tool gemini --mode analysis \
|
||||
--context "@{plan_json_path} @.workflow/project-guidelines.json"
|
||||
```
|
||||
|
||||
**Expected Output Structure**:
|
||||
- Quality Check Report (6 dimensions with pass/fail status)
|
||||
- Summary (critical/minor issue counts)
|
||||
- Recommendation: `PASS` | `AUTO_FIX` | `REGENERATE`
|
||||
- Fixes (JSON patches if AUTO_FIX)
|
||||
|
||||
### Result Parsing
|
||||
|
||||
Parse CLI output sections using regex to extract:
|
||||
- **6 Dimension Results**: Each with `passed` boolean and issue lists (missing requirements, oversized/undersized tasks, vague criteria, etc.)
|
||||
- **Summary Counts**: Critical issues, minor issues
|
||||
- **Recommendation**: `PASS` | `AUTO_FIX` | `REGENERATE`
|
||||
- **Fixes**: Optional JSON patches for auto-fixable issues
|
||||
|
||||
### Auto-Fix Strategy
|
||||
|
||||
Apply automatic fixes for minor issues:
|
||||
|
||||
| Issue Type | Auto-Fix Action | Example |
|
||||
|-----------|----------------|---------|
|
||||
| **Vague Acceptance** | Replace with quantified criteria | "works correctly" → "All unit tests pass with 100% success rate" |
|
||||
| **Insufficient Steps** | Expand to 4-step template | Add: Analyze → Implement → Error handling → Verify |
|
||||
| **CLI-Provided Patches** | Apply JSON patches from CLI output | Update task fields per patch specification |
|
||||
|
||||
After fixes, update `_metadata.quality_check` with fix log.
|
||||
|
||||
### Execution Flow
|
||||
|
||||
After Phase 4 planObject generation:
|
||||
|
||||
1. **Write Initial Plan** → `${sessionFolder}/plan.json`
|
||||
2. **Execute CLI Check** → Gemini (Qwen fallback)
|
||||
3. **Parse Results** → Extract recommendation and issues
|
||||
4. **Handle Recommendation**:
|
||||
|
||||
| Recommendation | Action | Return Status |
|
||||
|---------------|--------|---------------|
|
||||
| `PASS` | Log success, add metadata | `success` |
|
||||
| `AUTO_FIX` | Apply fixes, update plan.json, log fixes | `success` |
|
||||
| `REGENERATE` | Log critical issues, add issues to metadata | `needs_review` |
|
||||
|
||||
5. **Return** → Plan with `_metadata.quality_check` containing execution result
|
||||
|
||||
**CLI Fallback**: Gemini → Qwen → Skip with warning (if both fail)
|
||||
@@ -10,7 +10,7 @@ description: |
|
||||
commentary: Agent encapsulates CLI execution + result parsing + task generation
|
||||
|
||||
- Context: Coverage gap analysis
|
||||
user: "Analyze coverage gaps and generate补充test task"
|
||||
user: "Analyze coverage gaps and generate supplement test task"
|
||||
assistant: "Executing CLI analysis for uncovered code paths → Generating test supplement task"
|
||||
commentary: Agent handles both analysis and task JSON generation autonomously
|
||||
color: purple
|
||||
@@ -18,12 +18,11 @@ color: purple
|
||||
|
||||
You are a specialized execution agent that bridges CLI analysis tools with task generation. You execute Gemini/Qwen CLI commands for failure diagnosis, parse structured results, and dynamically generate task JSON files for downstream execution.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Execute CLI Analysis**: Run Gemini/Qwen with appropriate templates and context
|
||||
2. **Parse CLI Results**: Extract structured information (fix strategies, root causes, modification points)
|
||||
3. **Generate Task JSONs**: Create IMPL-fix-N.json or IMPL-supplement-N.json dynamically
|
||||
4. **Save Analysis Reports**: Store detailed CLI output as iteration-N-analysis.md
|
||||
**Core capabilities:**
|
||||
- Execute CLI analysis with appropriate templates and context
|
||||
- Parse structured results (fix strategies, root causes, modification points)
|
||||
- Generate task JSONs dynamically (IMPL-fix-N.json, IMPL-supplement-N.json)
|
||||
- Save detailed analysis reports (iteration-N-analysis.md)
|
||||
|
||||
## Execution Process
|
||||
|
||||
@@ -43,7 +42,7 @@ You are a specialized execution agent that bridges CLI analysis tools with task
|
||||
"file": "tests/test_auth.py",
|
||||
"line": 45,
|
||||
"criticality": "high",
|
||||
"test_type": "integration" // ← NEW: L0: static, L1: unit, L2: integration, L3: e2e
|
||||
"test_type": "integration" // L0: static, L1: unit, L2: integration, L3: e2e
|
||||
}
|
||||
],
|
||||
"error_messages": ["error1", "error2"],
|
||||
@@ -61,14 +60,13 @@ You are a specialized execution agent that bridges CLI analysis tools with task
|
||||
"tool": "gemini|qwen",
|
||||
"model": "gemini-3-pro-preview-11-2025|qwen-coder-model",
|
||||
"template": "01-diagnose-bug-root-cause.txt",
|
||||
"timeout": 2400000,
|
||||
"timeout": 2400000, // 40 minutes for analysis
|
||||
"fallback": "qwen"
|
||||
},
|
||||
"task_config": {
|
||||
"agent": "@test-fix-agent",
|
||||
"type": "test-fix-iteration",
|
||||
"max_iterations": 5,
|
||||
"use_codex": false
|
||||
"max_iterations": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -79,16 +77,16 @@ You are a specialized execution agent that bridges CLI analysis tools with task
|
||||
Phase 1: CLI Analysis Execution
|
||||
1. Validate context package and extract failure context
|
||||
2. Construct CLI command with appropriate template
|
||||
3. Execute Gemini/Qwen CLI tool
|
||||
3. Execute Gemini/Qwen CLI tool with layer-specific guidance
|
||||
4. Handle errors and fallback to alternative tool if needed
|
||||
5. Save raw CLI output to .process/iteration-N-cli-output.txt
|
||||
|
||||
Phase 2: Results Parsing & Strategy Extraction
|
||||
1. Parse CLI output for structured information:
|
||||
- Root cause analysis
|
||||
- Root cause analysis (RCA)
|
||||
- Fix strategy and approach
|
||||
- Modification points (files, functions, line numbers)
|
||||
- Expected outcome
|
||||
- Expected outcome and verification steps
|
||||
2. Extract quantified requirements:
|
||||
- Number of files to modify
|
||||
- Specific functions to fix (with line numbers)
|
||||
@@ -96,20 +94,20 @@ Phase 2: Results Parsing & Strategy Extraction
|
||||
3. Generate structured analysis report (iteration-N-analysis.md)
|
||||
|
||||
Phase 3: Task JSON Generation
|
||||
1. Load task JSON template (defined below)
|
||||
1. Load task JSON template
|
||||
2. Populate template with parsed CLI results
|
||||
3. Add iteration context and previous attempts
|
||||
4. Write task JSON to .workflow/{session}/.task/IMPL-fix-N.json
|
||||
4. Write task JSON to .workflow/session/{session}/.task/IMPL-fix-N.json
|
||||
5. Return success status and task ID to orchestrator
|
||||
```
|
||||
|
||||
## Core Functions
|
||||
|
||||
### 1. CLI Command Construction
|
||||
### 1. CLI Analysis Execution
|
||||
|
||||
**Template-Based Approach with Test Layer Awareness**:
|
||||
**Template-Based Command Construction with Test Layer Awareness**:
|
||||
```bash
|
||||
cd {project_root} && {cli_tool} -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Analyze {test_type} test failures and generate fix strategy for iteration {iteration}
|
||||
TASK:
|
||||
• Review {failed_tests.length} {test_type} test failures: [{test_names}]
|
||||
@@ -129,14 +127,14 @@ EXPECTED: Structured fix strategy with:
|
||||
- Fix approach ensuring business logic correctness (not just test passage)
|
||||
- Expected outcome and verification steps
|
||||
- Impact assessment: Will this fix potentially mask other issues?
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/{template}) |
|
||||
CONSTRAINTS:
|
||||
- For {test_type} tests: {layer_specific_guidance}
|
||||
- Avoid 'surgical fixes' that mask underlying issues
|
||||
- Provide specific line numbers for modifications
|
||||
- Consider previous iteration failures
|
||||
- Validate fix doesn't introduce new vulnerabilities
|
||||
- analysis=READ-ONLY
|
||||
" -m {model} {timeout_flag}
|
||||
" --tool {cli_tool} --mode analysis --rule {template} --cd {project_root} --timeout {timeout_value}
|
||||
```
|
||||
|
||||
**Layer-Specific Guidance Injection**:
|
||||
@@ -151,8 +149,9 @@ const layerGuidance = {
|
||||
const guidance = layerGuidance[test_type] || "Analyze holistically, avoid quick patches";
|
||||
```
|
||||
|
||||
**Error Handling & Fallback**:
|
||||
**Error Handling & Fallback Strategy**:
|
||||
```javascript
|
||||
// Primary execution with fallback chain
|
||||
try {
|
||||
result = executeCLI("gemini", config);
|
||||
} catch (error) {
|
||||
@@ -173,16 +172,18 @@ try {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback strategy when all CLI tools fail
|
||||
function generateBasicFixStrategy(failure_context) {
|
||||
// Generate basic fix task based on error pattern matching
|
||||
// Use previous successful fix patterns from fix-history.json
|
||||
// Limit to simple, low-risk fixes (add null checks, fix typos)
|
||||
// Mark task with meta.analysis_quality: "degraded" flag
|
||||
// Orchestrator will treat degraded analysis with caution
|
||||
}
|
||||
```
|
||||
|
||||
**Fallback Strategy (When All CLI Tools Fail)**:
|
||||
- Generate basic fix task based on error patterns matching
|
||||
- Use previous successful fix patterns from fix-history.json
|
||||
- Limit to simple, low-risk fixes (add null checks, fix typos)
|
||||
- Mark task with `meta.analysis_quality: "degraded"` flag
|
||||
- Orchestrator will treat degraded analysis with caution (may skip iteration)
|
||||
|
||||
### 2. CLI Output Parsing
|
||||
### 2. Output Parsing & Task Generation
|
||||
|
||||
**Expected CLI Output Structure** (from bug diagnosis template):
|
||||
```markdown
|
||||
@@ -220,18 +221,34 @@ try {
|
||||
```javascript
|
||||
const parsedResults = {
|
||||
root_causes: extractSection("根本原因分析"),
|
||||
modification_points: extractModificationPoints(),
|
||||
modification_points: extractModificationPoints(), // Returns: ["file:function:lines", ...]
|
||||
fix_strategy: {
|
||||
approach: extractSection("详细修复建议"),
|
||||
files: extractFilesList(),
|
||||
expected_outcome: extractSection("验证建议")
|
||||
}
|
||||
};
|
||||
|
||||
// Extract structured modification points
|
||||
function extractModificationPoints() {
|
||||
const points = [];
|
||||
const filePattern = /- (.+?\.(?:ts|js|py)) \(lines (\d+-\d+)\): (.+)/g;
|
||||
|
||||
let match;
|
||||
while ((match = filePattern.exec(cliOutput)) !== null) {
|
||||
points.push({
|
||||
file: match[1],
|
||||
lines: match[2],
|
||||
function: match[3],
|
||||
formatted: `${match[1]}:${match[3]}:${match[2]}`
|
||||
});
|
||||
}
|
||||
|
||||
return points;
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Task JSON Generation (Template Definition)
|
||||
|
||||
**Task JSON Template for IMPL-fix-N** (Simplified):
|
||||
**Task JSON Generation** (Simplified Template):
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-fix-{iteration}",
|
||||
@@ -245,7 +262,6 @@ const parsedResults = {
|
||||
"analysis_report": ".process/iteration-{iteration}-analysis.md",
|
||||
"cli_output": ".process/iteration-{iteration}-cli-output.txt",
|
||||
"max_iterations": "{task_config.max_iterations}",
|
||||
"use_codex": "{task_config.use_codex}",
|
||||
"parent_task": "{parent_task_id}",
|
||||
"created_by": "@cli-planning-agent",
|
||||
"created_at": "{timestamp}"
|
||||
@@ -284,9 +300,7 @@ const parsedResults = {
|
||||
{
|
||||
"step": "load_analysis_context",
|
||||
"action": "Load CLI analysis report for full failure context if needed",
|
||||
"commands": [
|
||||
"Read({meta.analysis_report})"
|
||||
],
|
||||
"commands": ["Read({meta.analysis_report})"],
|
||||
"output_to": "full_failure_analysis",
|
||||
"note": "Analysis report contains: failed_tests, error_messages, pass_rate, root causes, previous_attempts"
|
||||
}
|
||||
@@ -334,19 +348,17 @@ const parsedResults = {
|
||||
|
||||
**Template Variables Replacement**:
|
||||
- `{iteration}`: From context.iteration
|
||||
- `{test_type}`: Dominant test type from failed_tests (e.g., "integration", "unit")
|
||||
- `{test_type}`: Dominant test type from failed_tests
|
||||
- `{dominant_test_type}`: Most common test_type in failed_tests array
|
||||
- `{layer_specific_approach}`: Guidance based on test layer from layerGuidance map
|
||||
- `{layer_specific_approach}`: Guidance from layerGuidance map
|
||||
- `{fix_summary}`: First 50 chars of fix_strategy.approach
|
||||
- `{failed_tests.length}`: Count of failures
|
||||
- `{modification_points.length}`: Count of modification points
|
||||
- `{modification_points}`: Array of file:function:lines from parsed CLI output
|
||||
- `{modification_points}`: Array of file:function:lines
|
||||
- `{timestamp}`: ISO 8601 timestamp
|
||||
- `{parent_task_id}`: ID of the parent test task (e.g., "IMPL-002")
|
||||
- `{file1}`, `{file2}`, etc.: Specific file paths from modification_points
|
||||
- `{specific_change_1}`, etc.: Change descriptions for each modification point
|
||||
- `{parent_task_id}`: ID of parent test task
|
||||
|
||||
### 4. Analysis Report Generation
|
||||
### 3. Analysis Report Generation
|
||||
|
||||
**Structure of iteration-N-analysis.md**:
|
||||
```markdown
|
||||
@@ -373,6 +385,7 @@ pass_rate: {pass_rate}%
|
||||
- **Error**: {test.error}
|
||||
- **File**: {test.file}:{test.line}
|
||||
- **Criticality**: {test.criticality}
|
||||
- **Test Type**: {test.test_type}
|
||||
{endforeach}
|
||||
|
||||
## Root Cause Analysis
|
||||
@@ -403,15 +416,16 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
|
||||
### CLI Execution Standards
|
||||
- **Timeout Management**: Use dynamic timeout (2400000ms = 40min for analysis)
|
||||
- **Fallback Chain**: Gemini → Qwen (if Gemini fails with 429/404)
|
||||
- **Fallback Chain**: Gemini → Qwen → degraded mode (if both fail)
|
||||
- **Error Context**: Include full error details in failure reports
|
||||
- **Output Preservation**: Save raw CLI output for debugging
|
||||
- **Output Preservation**: Save raw CLI output to .process/ for debugging
|
||||
|
||||
### Task JSON Standards
|
||||
- **Quantification**: All requirements must include counts and explicit lists
|
||||
- **Specificity**: Modification points must have file:function:line format
|
||||
- **Measurability**: Acceptance criteria must include verification commands
|
||||
- **Traceability**: Link to analysis reports and CLI output files
|
||||
- **Minimal Redundancy**: Use references (analysis_report) instead of embedding full context
|
||||
|
||||
### Analysis Report Standards
|
||||
- **Structured Format**: Use consistent markdown sections
|
||||
@@ -422,6 +436,7 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS:**
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- **Validate context package**: Ensure all required fields present before CLI execution
|
||||
- **Handle CLI errors gracefully**: Use fallback chain (Gemini → Qwen → degraded mode)
|
||||
- **Parse CLI output structurally**: Extract specific sections (RCA, 修复建议, 验证建议)
|
||||
@@ -430,19 +445,26 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
- **Link files properly**: Use relative paths from session root
|
||||
- **Preserve CLI output**: Save raw output to .process/ for debugging
|
||||
- **Generate measurable acceptance criteria**: Include verification commands
|
||||
- **Apply layer-specific guidance**: Use test_type to customize analysis approach
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**NEVER:**
|
||||
- Execute tests directly (orchestrator manages test execution)
|
||||
- Skip CLI analysis (always run CLI even for simple failures)
|
||||
- Modify files directly (generate task JSON for @test-fix-agent to execute)
|
||||
- **Embed redundant data in task JSON** (use analysis_report reference instead)
|
||||
- **Copy input context verbatim to output** (creates data duplication)
|
||||
- Embed redundant data in task JSON (use analysis_report reference instead)
|
||||
- Copy input context verbatim to output (creates data duplication)
|
||||
- Generate vague modification points (always specify file:function:lines)
|
||||
- Exceed timeout limits (use configured timeout value)
|
||||
- Ignore test layer context (L0/L1/L2/L3 determines diagnosis approach)
|
||||
|
||||
## CLI Tool Configuration
|
||||
## Configuration & Examples
|
||||
|
||||
### Gemini Configuration
|
||||
### CLI Tool Configuration
|
||||
|
||||
**Gemini Configuration**:
|
||||
```javascript
|
||||
{
|
||||
"tool": "gemini",
|
||||
@@ -452,11 +474,12 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
"test-failure": "01-diagnose-bug-root-cause.txt",
|
||||
"coverage-gap": "02-analyze-code-patterns.txt",
|
||||
"regression": "01-trace-code-execution.txt"
|
||||
}
|
||||
},
|
||||
"timeout": 2400000 // 40 minutes
|
||||
}
|
||||
```
|
||||
|
||||
### Qwen Configuration (Fallback)
|
||||
**Qwen Configuration (Fallback)**:
|
||||
```javascript
|
||||
{
|
||||
"tool": "qwen",
|
||||
@@ -464,47 +487,12 @@ See: `.process/iteration-{iteration}-cli-output.txt`
|
||||
"templates": {
|
||||
"test-failure": "01-diagnose-bug-root-cause.txt",
|
||||
"coverage-gap": "02-analyze-code-patterns.txt"
|
||||
}
|
||||
},
|
||||
"timeout": 2400000 // 40 minutes
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with test-cycle-execute
|
||||
|
||||
**Orchestrator Call Pattern**:
|
||||
```javascript
|
||||
// When pass_rate < 95%
|
||||
Task(
|
||||
subagent_type="cli-planning-agent",
|
||||
description=`Analyze test failures and generate fix task (iteration ${iteration})`,
|
||||
prompt=`
|
||||
## Context Package
|
||||
${JSON.stringify(contextPackage, null, 2)}
|
||||
|
||||
## Your Task
|
||||
1. Execute CLI analysis using ${cli_config.tool}
|
||||
2. Parse CLI output and extract fix strategy
|
||||
3. Generate IMPL-fix-${iteration}.json with structured task definition
|
||||
4. Save analysis report to .process/iteration-${iteration}-analysis.md
|
||||
5. Report success and task ID back to orchestrator
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Agent Response**:
|
||||
```javascript
|
||||
{
|
||||
"status": "success",
|
||||
"task_id": "IMPL-fix-{iteration}",
|
||||
"task_path": ".workflow/{session}/.task/IMPL-fix-{iteration}.json",
|
||||
"analysis_report": ".process/iteration-{iteration}-analysis.md",
|
||||
"cli_output": ".process/iteration-{iteration}-cli-output.txt",
|
||||
"summary": "{fix_strategy.approach first 100 chars}",
|
||||
"modification_points_count": {count},
|
||||
"estimated_complexity": "low|medium|high"
|
||||
}
|
||||
```
|
||||
|
||||
## Example Execution
|
||||
### Example Execution
|
||||
|
||||
**Input Context**:
|
||||
```json
|
||||
@@ -530,24 +518,45 @@ Task(
|
||||
"cli_config": {
|
||||
"tool": "gemini",
|
||||
"template": "01-diagnose-bug-root-cause.txt"
|
||||
},
|
||||
"task_config": {
|
||||
"agent": "@test-fix-agent",
|
||||
"type": "test-fix-iteration",
|
||||
"max_iterations": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execution Steps**:
|
||||
1. Detect test_type: "integration" → Apply integration-specific diagnosis
|
||||
2. Execute: `gemini -p "PURPOSE: Analyze integration test failure... [layer-specific context]"`
|
||||
- CLI prompt includes: "Examine component interactions, data flow, interface contracts"
|
||||
- Guidance: "Analyze full call stack and data flow across components"
|
||||
3. Parse: Extract RCA, 修复建议, 验证建议 sections
|
||||
4. Generate: IMPL-fix-1.json (SIMPLIFIED) with:
|
||||
**Execution Summary**:
|
||||
1. **Detect test_type**: "integration" → Apply integration-specific diagnosis
|
||||
2. **Execute CLI**:
|
||||
```bash
|
||||
ccw cli -p "PURPOSE: Analyze integration test failure...
|
||||
TASK: Examine component interactions, data flow, interface contracts...
|
||||
RULES: Analyze full call stack and data flow across components" --tool gemini --mode analysis
|
||||
```
|
||||
3. **Parse Output**: Extract RCA, 修复建议, 验证建议 sections
|
||||
4. **Generate Task JSON** (IMPL-fix-1.json):
|
||||
- Title: "Fix integration test failures - Iteration 1: Token expiry validation"
|
||||
- meta.analysis_report: ".process/iteration-1-analysis.md" (Reference, not embedded data)
|
||||
- meta.analysis_report: ".process/iteration-1-analysis.md" (reference)
|
||||
- meta.test_layer: "integration"
|
||||
- Requirements: "Fix 1 integration test failures by applying the provided fix strategy"
|
||||
- fix_strategy.modification_points: ["src/auth/auth.service.ts:validateToken:45-60", "src/middleware/auth.middleware.ts:checkExpiry:120-135"]
|
||||
- Requirements: "Fix 1 integration test failures by applying provided fix strategy"
|
||||
- fix_strategy.modification_points:
|
||||
- "src/auth/auth.service.ts:validateToken:45-60"
|
||||
- "src/middleware/auth.middleware.ts:checkExpiry:120-135"
|
||||
- fix_strategy.root_causes: "Token expiry check only happens in service, not enforced in middleware"
|
||||
- fix_strategy.quality_assurance: {avoids_symptom_fix: true, addresses_root_cause: true}
|
||||
- **NO failure_context object** - full context available via analysis_report reference
|
||||
5. Save: iteration-1-analysis.md with full CLI output, layer context, failed_tests details, previous_attempts
|
||||
6. Return: task_id="IMPL-fix-1", test_layer="integration", status="success"
|
||||
5. **Save Analysis Report**: iteration-1-analysis.md with full CLI output, layer context, failed_tests details
|
||||
6. **Return**:
|
||||
```javascript
|
||||
{
|
||||
status: "success",
|
||||
task_id: "IMPL-fix-1",
|
||||
task_path: ".workflow/WFS-test-session-001/.task/IMPL-fix-1.json",
|
||||
analysis_report: ".process/iteration-1-analysis.md",
|
||||
cli_output: ".process/iteration-1-cli-output.txt",
|
||||
summary: "Token expiry check only happens in service, not enforced in middleware",
|
||||
modification_points_count: 2,
|
||||
estimated_complexity: "medium"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -24,8 +24,6 @@ You are a code execution specialist focused on implementing high-quality, produc
|
||||
- **Context-driven** - Use provided context and existing code patterns
|
||||
- **Quality over speed** - Write boring, reliable code that works
|
||||
|
||||
|
||||
|
||||
## Execution Process
|
||||
|
||||
### 1. Context Assessment
|
||||
@@ -35,18 +33,51 @@ You are a code execution specialist focused on implementing high-quality, produc
|
||||
- Project CLAUDE.md standards
|
||||
- **context-package.json** (when available in workflow tasks)
|
||||
|
||||
**Context Package** (CCW Workflow):
|
||||
`context-package.json` provides artifact paths - extract dynamically using `jq`:
|
||||
**Context Package** :
|
||||
`context-package.json` provides artifact paths - read using Read tool or ccw session:
|
||||
```bash
|
||||
# Get role analysis paths from context package
|
||||
jq -r '.brainstorm_artifacts.role_analyses[].files[].path' context-package.json
|
||||
# Get context package content from session using Read tool
|
||||
Read(.workflow/active/${SESSION_ID}/.process/context-package.json)
|
||||
# Returns parsed JSON with brainstorm_artifacts, focus_paths, etc.
|
||||
```
|
||||
|
||||
**Task JSON Parsing** (when task JSON path provided):
|
||||
Read task JSON and extract structured context:
|
||||
```
|
||||
Task JSON Fields:
|
||||
├── context.requirements[] → What to implement (list of requirements)
|
||||
├── context.acceptance[] → How to verify (validation commands)
|
||||
├── context.focus_paths[] → Where to focus (directories/files)
|
||||
├── context.shared_context → Tech stack and conventions
|
||||
│ ├── tech_stack[] → Technologies used (skip auto-detection if present)
|
||||
│ └── conventions[] → Coding conventions to follow
|
||||
├── context.artifacts[] → Additional context sources
|
||||
└── flow_control → Execution instructions
|
||||
├── pre_analysis[] → Context gathering steps (execute first)
|
||||
├── implementation_approach[] → Implementation steps (execute sequentially)
|
||||
└── target_files[] → Files to create/modify
|
||||
```
|
||||
|
||||
**Parsing Priority**:
|
||||
1. Read task JSON from provided path
|
||||
2. Extract `context.requirements` as implementation goals
|
||||
3. Extract `context.acceptance` as verification criteria
|
||||
4. If `context.shared_context.tech_stack` exists → skip auto-detection, use provided stack
|
||||
5. Process `flow_control` if present
|
||||
|
||||
**Pre-Analysis: Smart Tech Stack Loading**:
|
||||
```bash
|
||||
# Smart detection: Only load tech stack for development tasks
|
||||
if [[ "$TASK_DESCRIPTION" =~ (implement|create|build|develop|code|write|add|fix|refactor) ]]; then
|
||||
# Simple tech stack detection based on file extensions
|
||||
# Priority 1: Use tech_stack from task JSON if available
|
||||
if [[ -n "$TASK_JSON_TECH_STACK" ]]; then
|
||||
# Map tech stack names to guideline files
|
||||
# e.g., ["FastAPI", "SQLAlchemy"] → python-dev.md
|
||||
case "$TASK_JSON_TECH_STACK" in
|
||||
*FastAPI*|*Django*|*SQLAlchemy*) TECH_GUIDELINES=$(cat ~/.claude/workflows/cli-templates/tech-stacks/python-dev.md) ;;
|
||||
*React*|*Next*) TECH_GUIDELINES=$(cat ~/.claude/workflows/cli-templates/tech-stacks/react-dev.md) ;;
|
||||
*TypeScript*) TECH_GUIDELINES=$(cat ~/.claude/workflows/cli-templates/tech-stacks/typescript-dev.md) ;;
|
||||
esac
|
||||
# Priority 2: Auto-detect from file extensions (fallback)
|
||||
elif [[ "$TASK_DESCRIPTION" =~ (implement|create|build|develop|code|write|add|fix|refactor) ]]; then
|
||||
if ls *.ts *.tsx 2>/dev/null | head -1; then
|
||||
TECH_GUIDELINES=$(cat ~/.claude/workflows/cli-templates/tech-stacks/typescript-dev.md)
|
||||
elif grep -q "react" package.json 2>/dev/null; then
|
||||
@@ -65,28 +96,65 @@ fi
|
||||
|
||||
**Context Evaluation**:
|
||||
```
|
||||
IF task is development-related (implement|create|build|develop|code|write|add|fix|refactor):
|
||||
→ Execute smart tech stack detection and load guidelines into [tech_guidelines] variable
|
||||
→ All subsequent development must follow loaded tech stack principles
|
||||
ELSE:
|
||||
→ Skip tech stack loading for non-development tasks
|
||||
STEP 1: Parse Task JSON (if path provided)
|
||||
→ Read task JSON file from provided path
|
||||
→ Extract and store in memory:
|
||||
• [requirements] ← context.requirements[]
|
||||
• [acceptance_criteria] ← context.acceptance[]
|
||||
• [tech_stack] ← context.shared_context.tech_stack[] (skip auto-detection if present)
|
||||
• [conventions] ← context.shared_context.conventions[]
|
||||
• [focus_paths] ← context.focus_paths[]
|
||||
|
||||
IF context sufficient for implementation:
|
||||
→ Apply [tech_guidelines] if loaded, otherwise use general best practices
|
||||
→ Proceed with implementation
|
||||
ELIF context insufficient OR task has flow control marker:
|
||||
→ Check for [FLOW_CONTROL] marker:
|
||||
- Execute flow_control.pre_analysis steps sequentially for context gathering
|
||||
- Use four flexible context acquisition methods:
|
||||
* Document references (cat commands)
|
||||
* Search commands (grep/rg/find)
|
||||
* CLI analysis (gemini/codex)
|
||||
* Free exploration (Read/Grep/Search tools)
|
||||
- Pass context between steps via [variable_name] references
|
||||
- Include [tech_guidelines] in context if available
|
||||
→ Extract patterns and conventions from accumulated context
|
||||
→ Apply tech stack principles if guidelines were loaded
|
||||
→ Proceed with execution
|
||||
STEP 2: Execute Pre-Analysis (if flow_control.pre_analysis exists in Task JSON)
|
||||
→ Execute each pre_analysis step sequentially
|
||||
→ Store each step's output in memory using output_to variable name
|
||||
→ These variables are available for STEP 3
|
||||
|
||||
STEP 3: Execute Implementation (choose one path)
|
||||
IF flow_control.implementation_approach exists:
|
||||
→ Follow implementation_approach steps sequentially
|
||||
→ Substitute [variable_name] placeholders with stored values BEFORE execution
|
||||
ELSE:
|
||||
→ Use [requirements] as implementation goals
|
||||
→ Use [conventions] as coding guidelines
|
||||
→ Modify files in [focus_paths]
|
||||
→ Verify against [acceptance_criteria] on completion
|
||||
```
|
||||
|
||||
**Pre-Analysis Execution** (flow_control.pre_analysis):
|
||||
```
|
||||
For each step in pre_analysis[]:
|
||||
step.step → Step identifier (string name)
|
||||
step.action → Description of what to do
|
||||
step.commands → Array of commands to execute (see Command-to-Tool Mapping)
|
||||
step.output_to → Variable name to store results in memory
|
||||
step.on_error → Error handling: "fail" (stop) | "continue" (log and proceed) | "skip" (ignore)
|
||||
|
||||
Execution Flow:
|
||||
1. For each step in order:
|
||||
2. For each command in step.commands[]:
|
||||
3. Parse command format → Map to actual tool
|
||||
4. Execute tool → Capture output
|
||||
5. Concatenate all outputs → Store in [step.output_to] variable
|
||||
6. Continue to next step (or handle error per on_error)
|
||||
```
|
||||
|
||||
**Command-to-Tool Mapping** (explicit tool bindings):
|
||||
```
|
||||
Command Format → Actual Tool Call
|
||||
─────────────────────────────────────────────────────
|
||||
"Read(path)" → Read tool: Read(file_path=path)
|
||||
"bash(command)" → Bash tool: Bash(command=command)
|
||||
"Search(pattern,path)" → Grep tool: Grep(pattern=pattern, path=path)
|
||||
"Glob(pattern)" → Glob tool: Glob(pattern=pattern)
|
||||
"mcp__xxx__yyy(args)" → MCP tool: mcp__xxx__yyy(args)
|
||||
|
||||
Example Parsing:
|
||||
"Read(backend/app/models/simulation.py)"
|
||||
→ Tool: Read
|
||||
→ Parameter: file_path = "backend/app/models/simulation.py"
|
||||
→ Execute: Read(file_path="backend/app/models/simulation.py")
|
||||
→ Store output in [output_to] variable
|
||||
```
|
||||
### Module Verification Guidelines
|
||||
|
||||
@@ -103,29 +171,165 @@ ELIF context insufficient OR task has flow control marker:
|
||||
|
||||
**Implementation Approach Execution**:
|
||||
When task JSON contains `flow_control.implementation_approach` array:
|
||||
1. **Sequential Processing**: Execute steps in order, respecting `depends_on` dependencies
|
||||
2. **Dependency Resolution**: Wait for all steps listed in `depends_on` before starting
|
||||
3. **Variable Substitution**: Use `[variable_name]` to reference outputs from previous steps
|
||||
4. **Step Structure**:
|
||||
- `step`: Unique identifier (1, 2, 3...)
|
||||
- `title`: Step title
|
||||
- `description`: Detailed description with variable references
|
||||
- `modification_points`: Code modification targets
|
||||
- `logic_flow`: Business logic sequence
|
||||
- `command`: Optional CLI command (only when explicitly specified)
|
||||
- `depends_on`: Array of step numbers that must complete first
|
||||
- `output`: Variable name for this step's output
|
||||
5. **Execution Rules**:
|
||||
- Execute step 1 first (typically has `depends_on: []`)
|
||||
- For each subsequent step, verify all `depends_on` steps completed
|
||||
- Substitute `[variable_name]` with actual outputs from previous steps
|
||||
- Store this step's result in the `output` variable for future steps
|
||||
- If `command` field present, execute it; otherwise use agent capabilities
|
||||
|
||||
**CLI Command Execution (CLI Execute Mode)**:
|
||||
When step contains `command` field with Codex CLI, execute via Bash tool. For Codex resume:
|
||||
- First task (`depends_on: []`): `codex -C [path] --full-auto exec "..." --skip-git-repo-check -s danger-full-access`
|
||||
- Subsequent tasks (has `depends_on`): Add `resume --last` flag to maintain session context
|
||||
**Step Structure**:
|
||||
```
|
||||
step → Unique identifier (1, 2, 3...)
|
||||
title → Step title for logging
|
||||
description → What to implement (may contain [variable_name] placeholders)
|
||||
modification_points → Specific code changes required (files to create/modify)
|
||||
logic_flow → Business logic sequence to implement
|
||||
command → (Optional) CLI command to execute
|
||||
depends_on → Array of step numbers that must complete first
|
||||
output → Variable name to store this step's result
|
||||
```
|
||||
|
||||
**Execution Flow**:
|
||||
```
|
||||
// Read task-level execution config (Single Source of Truth)
|
||||
const executionMethod = task.meta?.execution_config?.method || 'agent';
|
||||
const cliTool = task.meta?.execution_config?.cli_tool || getDefaultCliTool(); // See ~/.claude/cli-tools.json
|
||||
|
||||
// Phase 1: Execute pre_analysis (always by Agent)
|
||||
const preAnalysisResults = {};
|
||||
for (const step of task.flow_control.pre_analysis || []) {
|
||||
const result = executePreAnalysisStep(step);
|
||||
preAnalysisResults[step.output_to] = result;
|
||||
}
|
||||
|
||||
// Phase 2: Determine execution mode
|
||||
const hasLegacyCommands = task.flow_control.implementation_approach
|
||||
.some(step => step.command);
|
||||
|
||||
IF hasLegacyCommands:
|
||||
// Backward compatibility: Old mode with step.command fields
|
||||
FOR each step in implementation_approach[]:
|
||||
IF step.command exists:
|
||||
→ Execute via Bash: Bash({ command: step.command, timeout: 3600000 })
|
||||
ELSE:
|
||||
→ Agent direct implementation
|
||||
|
||||
ELSE IF executionMethod === 'cli':
|
||||
// New mode: CLI Handoff
|
||||
→ const cliPrompt = buildCliHandoffPrompt(preAnalysisResults, task)
|
||||
→ const cliCommand = buildCliCommand(task, cliTool, cliPrompt)
|
||||
→ Bash({ command: cliCommand, run_in_background: false, timeout: 3600000 })
|
||||
|
||||
ELSE IF executionMethod === 'hybrid':
|
||||
// Hybrid mode: Agent decides based on task complexity
|
||||
→ IF task is complex (multiple files, complex logic):
|
||||
Use CLI Handoff (same as cli mode)
|
||||
ELSE:
|
||||
Use Agent direct implementation
|
||||
|
||||
ELSE (executionMethod === 'agent'):
|
||||
// Default: Agent direct implementation
|
||||
FOR each step in implementation_approach[]:
|
||||
1. Variable Substitution: Replace [variable_name] with preAnalysisResults
|
||||
2. Read modification_points[] as files to create/modify
|
||||
3. Read logic_flow[] as implementation sequence
|
||||
4. For each file in modification_points:
|
||||
• If "Create new file: path" → Use Write tool
|
||||
• If "Modify file: path" → Use Edit tool
|
||||
• If "Add to file: path" → Use Edit tool (append)
|
||||
5. Follow logic_flow sequence
|
||||
6. Use [focus_paths] from context as working directory scope
|
||||
7. Store result in [step.output] variable
|
||||
```
|
||||
|
||||
**CLI Handoff Functions**:
|
||||
|
||||
```javascript
|
||||
// Get default CLI tool from cli-tools.json
|
||||
function getDefaultCliTool() {
|
||||
// Read ~/.claude/cli-tools.json and return first enabled tool
|
||||
// Fallback order: gemini → qwen → codex (first enabled in config)
|
||||
return firstEnabledTool || 'gemini'; // System default fallback
|
||||
}
|
||||
|
||||
// Build CLI prompt from pre-analysis results and task
|
||||
function buildCliHandoffPrompt(preAnalysisResults, task) {
|
||||
const contextSection = Object.entries(preAnalysisResults)
|
||||
.map(([key, value]) => `### ${key}\n${value}`)
|
||||
.join('\n\n');
|
||||
|
||||
const approachSection = task.flow_control.implementation_approach
|
||||
.map((step, i) => `
|
||||
### Step ${step.step}: ${step.title}
|
||||
${step.description}
|
||||
|
||||
**Modification Points**:
|
||||
${step.modification_points?.map(m => `- ${m}`).join('\n') || 'N/A'}
|
||||
|
||||
**Logic Flow**:
|
||||
${step.logic_flow?.map((l, j) => `${j + 1}. ${l}`).join('\n') || 'Follow modification points'}
|
||||
`).join('\n');
|
||||
|
||||
return `
|
||||
PURPOSE: ${task.title}
|
||||
Complete implementation based on pre-analyzed context.
|
||||
|
||||
## PRE-ANALYSIS CONTEXT
|
||||
${contextSection}
|
||||
|
||||
## REQUIREMENTS
|
||||
${task.context.requirements?.map(r => `- ${r}`).join('\n') || task.context.requirements}
|
||||
|
||||
## IMPLEMENTATION APPROACH
|
||||
${approachSection}
|
||||
|
||||
## ACCEPTANCE CRITERIA
|
||||
${task.context.acceptance?.map(a => `- ${a}`).join('\n') || task.context.acceptance}
|
||||
|
||||
## TARGET FILES
|
||||
${task.flow_control.target_files?.map(f => `- ${f}`).join('\n') || 'See modification points above'}
|
||||
|
||||
MODE: write
|
||||
CONSTRAINTS: Follow existing patterns | No breaking changes
|
||||
`.trim();
|
||||
}
|
||||
|
||||
// Build CLI command with resume strategy
|
||||
function buildCliCommand(task, cliTool, cliPrompt) {
|
||||
const cli = task.cli_execution || {};
|
||||
const escapedPrompt = cliPrompt.replace(/"/g, '\\"');
|
||||
const baseCmd = `ccw cli -p "${escapedPrompt}"`;
|
||||
|
||||
switch (cli.strategy) {
|
||||
case 'new':
|
||||
return `${baseCmd} --tool ${cliTool} --mode write --id ${task.cli_execution_id}`;
|
||||
case 'resume':
|
||||
return `${baseCmd} --resume ${cli.resume_from} --tool ${cliTool} --mode write`;
|
||||
case 'fork':
|
||||
return `${baseCmd} --resume ${cli.resume_from} --id ${task.cli_execution_id} --tool ${cliTool} --mode write`;
|
||||
case 'merge_fork':
|
||||
return `${baseCmd} --resume ${cli.merge_from.join(',')} --id ${task.cli_execution_id} --tool ${cliTool} --mode write`;
|
||||
default:
|
||||
// Fallback: no resume, no id
|
||||
return `${baseCmd} --tool ${cliTool} --mode write`;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execution Config Reference** (from task.meta.execution_config):
|
||||
| Field | Values | Description |
|
||||
|-------|--------|-------------|
|
||||
| `method` | `agent` / `cli` / `hybrid` | Execution mode (default: agent) |
|
||||
| `cli_tool` | See `~/.claude/cli-tools.json` | CLI tool preference (first enabled tool as default) |
|
||||
| `enable_resume` | `true` / `false` | Enable CLI session resume |
|
||||
|
||||
**CLI Execution Reference** (from task.cli_execution):
|
||||
| Field | Values | Description |
|
||||
|-------|--------|-------------|
|
||||
| `strategy` | `new` / `resume` / `fork` / `merge_fork` | Resume strategy |
|
||||
| `resume_from` | `{session}-{task_id}` | Parent task CLI ID (resume/fork) |
|
||||
| `merge_from` | `[{id1}, {id2}]` | Parent task CLI IDs (merge_fork) |
|
||||
|
||||
**Resume Strategy Examples**:
|
||||
- **New task** (no dependencies): `--id WFS-001-IMPL-001`
|
||||
- **Resume** (single dependency, single child): `--resume WFS-001-IMPL-001`
|
||||
- **Fork** (single dependency, multiple children): `--resume WFS-001-IMPL-001 --id WFS-001-IMPL-002`
|
||||
- **Merge** (multiple dependencies): `--resume WFS-001-IMPL-001,WFS-001-IMPL-002 --id WFS-001-IMPL-003`
|
||||
|
||||
**Test-Driven Development**:
|
||||
- Write tests first (red → green → refactor)
|
||||
@@ -297,7 +501,16 @@ Before completing any task, verify:
|
||||
- Make assumptions - verify with existing code
|
||||
- Create unnecessary complexity
|
||||
|
||||
**Bash Tool (CLI Execution in Agent)**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls - agent cannot receive task hook callbacks
|
||||
- Set timeout ≥60 minutes for CLI commands (hooks don't propagate to subagents):
|
||||
```javascript
|
||||
Bash(command="ccw cli -p '...' --tool <cli-tool> --mode write", timeout=3600000) // 60 min
|
||||
// <cli-tool>: First enabled tool from ~/.claude/cli-tools.json (e.g., gemini, qwen, codex)
|
||||
```
|
||||
|
||||
**ALWAYS:**
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- Verify module/package existence with rg/grep/search before referencing
|
||||
- Write working code incrementally
|
||||
- Test your implementation thoroughly
|
||||
|
||||
@@ -27,6 +27,8 @@ You are a conceptual planning specialist focused on **dedicated single-role** st
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
1. **Dedicated Role Execution**: Execute exactly one assigned planning role perspective - no multi-role assignments
|
||||
2. **Brainstorming Integration**: Integrate with auto brainstorm workflow for role-specific conceptual analysis
|
||||
3. **Template-Driven Analysis**: Use planning role templates loaded via `$(cat template)`
|
||||
@@ -109,7 +111,7 @@ This agent processes **simplified inline [FLOW_CONTROL]** format from brainstorm
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata
|
||||
- Command: bash(cat .workflow/WFS-{session}/workflow-session.json)
|
||||
- Command: Read(.workflow/active/WFS-{session}/workflow-session.json)
|
||||
- Output: session_metadata
|
||||
```
|
||||
|
||||
@@ -119,17 +121,6 @@ This agent processes **simplified inline [FLOW_CONTROL]** format from brainstorm
|
||||
- No dependency management
|
||||
- Used for temporary context preparation
|
||||
|
||||
### NOT Handled by This Agent
|
||||
|
||||
**JSON format** (used by code-developer, test-fix-agent):
|
||||
```json
|
||||
"flow_control": {
|
||||
"pre_analysis": [...],
|
||||
"implementation_approach": [...]
|
||||
}
|
||||
```
|
||||
|
||||
This complete JSON format is stored in `.task/IMPL-*.json` files and handled by implementation agents, not conceptual-planning-agent.
|
||||
|
||||
### Role-Specific Analysis Dimensions
|
||||
|
||||
@@ -146,14 +137,14 @@ This complete JSON format is stored in `.task/IMPL-*.json` files and handled by
|
||||
|
||||
### Output Integration
|
||||
|
||||
**Gemini Analysis Integration**: Pattern-based analysis results are integrated into the single role's output:
|
||||
- Enhanced `analysis.md` with codebase insights and architectural patterns
|
||||
**Gemini Analysis Integration**: Pattern-based analysis results are integrated into role output documents:
|
||||
- Enhanced analysis documents with codebase insights and architectural patterns
|
||||
- Role-specific technical recommendations based on existing conventions
|
||||
- Pattern-based best practices from actual code examination
|
||||
- Realistic feasibility assessments based on current implementation
|
||||
|
||||
**Codex Analysis Integration**: Autonomous analysis results provide comprehensive insights:
|
||||
- Enhanced `analysis.md` with autonomous development recommendations
|
||||
- Enhanced analysis documents with autonomous development recommendations
|
||||
- Role-specific strategy based on intelligent system understanding
|
||||
- Autonomous development approaches and implementation guidance
|
||||
- Self-guided optimization and integration recommendations
|
||||
@@ -166,7 +157,7 @@ When called, you receive:
|
||||
- **User Context**: Specific requirements, constraints, and expectations from user discussion
|
||||
- **Output Location**: Directory path for generated analysis files
|
||||
- **Role Hint** (optional): Suggested role or role selection guidance
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - extract using `jq -r '.brainstorm_artifacts.role_analyses[].files[].path'`
|
||||
- **context-package.json** (CCW Workflow): Artifact paths catalog - use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- **ASSIGNED_ROLE** (optional): Specific role assignment
|
||||
- **ANALYSIS_DIMENSIONS** (optional): Role-specific analysis dimensions
|
||||
|
||||
@@ -229,26 +220,23 @@ Generate documents according to loaded role template specifications:
|
||||
|
||||
**Output Location**: `.workflow/WFS-[session]/.brainstorming/[assigned-role]/`
|
||||
|
||||
**Required Files**:
|
||||
- **analysis.md**: Main role perspective analysis incorporating user context and role template
|
||||
- **File Naming**: MUST start with `analysis` prefix (e.g., `analysis.md`, `analysis-1.md`, `analysis-2.md`)
|
||||
**Output Files**:
|
||||
- **analysis.md**: Index document with overview (optionally with `@` references to sub-documents)
|
||||
- **FORBIDDEN**: Never create `recommendations.md` or any file not starting with `analysis` prefix
|
||||
- **Auto-split if large**: If content >800 lines, split to `analysis-1.md`, `analysis-2.md` (max 3 files: analysis.md, analysis-1.md, analysis-2.md)
|
||||
- **Content**: Includes both analysis AND recommendations sections within analysis files
|
||||
- **[role-deliverables]/**: Directory for specialized role outputs as defined in planning role template (optional)
|
||||
- **analysis-{slug}.md**: Section content documents (slug from section heading: lowercase, hyphens)
|
||||
- Maximum 5 sub-documents (merge related sections if needed)
|
||||
- **Content**: Analysis AND recommendations sections
|
||||
|
||||
**File Structure Example**:
|
||||
```
|
||||
.workflow/WFS-[session]/.brainstorming/system-architect/
|
||||
├── analysis.md # Main system architecture analysis with recommendations
|
||||
├── analysis-1.md # (Optional) Continuation if content >800 lines
|
||||
└── deliverables/ # (Optional) Additional role-specific outputs
|
||||
├── technical-architecture.md # System design specifications
|
||||
├── technology-stack.md # Technology selection rationale
|
||||
└── scalability-plan.md # Scaling strategy
|
||||
├── analysis.md # Index with overview + @references
|
||||
├── analysis-architecture-assessment.md # Section content
|
||||
├── analysis-technology-evaluation.md # Section content
|
||||
├── analysis-integration-strategy.md # Section content
|
||||
└── analysis-recommendations.md # Section content (max 5 sub-docs total)
|
||||
|
||||
NOTE: ALL brainstorming output files MUST start with 'analysis' prefix
|
||||
FORBIDDEN: recommendations.md, recommendations-*.md, or any non-'analysis' prefixed files
|
||||
NOTE: ALL files MUST start with 'analysis' prefix. Max 5 sub-documents.
|
||||
```
|
||||
|
||||
## Role-Specific Planning Process
|
||||
@@ -268,14 +256,10 @@ FORBIDDEN: recommendations.md, recommendations-*.md, or any non-'analysis' prefi
|
||||
- **Validate Against Template**: Ensure analysis meets role template requirements and standards
|
||||
|
||||
### 3. Brainstorming Documentation Phase
|
||||
- **Create analysis.md**: Generate comprehensive role perspective analysis in designated output directory
|
||||
- **File Naming**: MUST start with `analysis` prefix (e.g., `analysis.md`, `analysis-1.md`, `analysis-2.md`)
|
||||
- **FORBIDDEN**: Never create `recommendations.md` or any file not starting with `analysis` prefix
|
||||
- **Content**: Include both analysis AND recommendations sections within analysis files
|
||||
- **Auto-split**: If content >800 lines, split to `analysis-1.md`, `analysis-2.md` (max 3 files total)
|
||||
- **Generate Role Deliverables**: Create specialized outputs as defined in planning role template (optional)
|
||||
- **Create analysis.md**: Main document with overview (optionally with `@` references)
|
||||
- **Create sub-documents**: `analysis-{slug}.md` for major sections (max 5)
|
||||
- **Validate Output Structure**: Ensure all files saved to correct `.brainstorming/[role]/` directory
|
||||
- **Naming Validation**: Verify NO files with `recommendations` prefix exist
|
||||
- **Naming Validation**: Verify ALL files start with `analysis` prefix
|
||||
- **Quality Review**: Ensure outputs meet role template standards and user requirements
|
||||
|
||||
## Role-Specific Analysis Framework
|
||||
@@ -324,5 +308,14 @@ When analysis is complete, ensure:
|
||||
- **Relevance**: Directly addresses user's specified requirements
|
||||
- **Actionability**: Provides concrete next steps and recommendations
|
||||
|
||||
### Windows Path Format Guidelines
|
||||
- **Quick Ref**: `C:\Users` → MCP: `C:\\Users` | Bash: `/c/Users` or `C:/Users`
|
||||
## Output Size Limits
|
||||
|
||||
**Per-role limits** (prevent context overflow):
|
||||
- `analysis.md`: < 3000 words
|
||||
- `analysis-*.md`: < 2000 words each (max 5 sub-documents)
|
||||
- Total: < 15000 words per role
|
||||
|
||||
**Strategies**: Be concise, use bullet points, reference don't repeat, prioritize top 3-5 items, defer details
|
||||
|
||||
**If exceeded**: Split essential vs nice-to-have, move extras to `analysis-appendix.md` (counts toward limit), use executive summary style
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ You are a context discovery specialist focused on gathering relevant project inf
|
||||
### 1. Reference Documentation (Project Standards)
|
||||
**Tools**:
|
||||
- `Read()` - Load CLAUDE.md, README.md, architecture docs
|
||||
- `Bash(~/.claude/scripts/get_modules_by_depth.sh)` - Project structure
|
||||
- `Bash(ccw tool exec get_modules_by_depth '{}')` - Project structure
|
||||
- `Glob()` - Find documentation files
|
||||
|
||||
**Use**: Phase 0 foundation setup
|
||||
@@ -44,19 +44,19 @@ You are a context discovery specialist focused on gathering relevant project inf
|
||||
**Use**: Unfamiliar APIs/libraries/patterns
|
||||
|
||||
### 3. Existing Code Discovery
|
||||
**Primary (Code-Index MCP)**:
|
||||
- `mcp__code-index__set_project_path()` - Initialize index
|
||||
- `mcp__code-index__find_files(pattern)` - File pattern matching
|
||||
- `mcp__code-index__search_code_advanced()` - Content search
|
||||
- `mcp__code-index__get_file_summary()` - File structure analysis
|
||||
- `mcp__code-index__refresh_index()` - Update index
|
||||
**Primary (CCW CodexLens MCP)**:
|
||||
- `mcp__ccw-tools__codex_lens(action="init", path=".")` - Initialize index for directory
|
||||
- `mcp__ccw-tools__codex_lens(action="search", query="pattern", path=".")` - Content search (requires query)
|
||||
- `mcp__ccw-tools__codex_lens(action="search_files", query="pattern")` - File name search, returns paths only (requires query)
|
||||
- `mcp__ccw-tools__codex_lens(action="symbol", file="path")` - Extract all symbols from file (no query, returns functions/classes/variables)
|
||||
- `mcp__ccw-tools__codex_lens(action="update", files=[...])` - Update index for specific files
|
||||
|
||||
**Fallback (CLI)**:
|
||||
- `rg` (ripgrep) - Fast content search
|
||||
- `find` - File discovery
|
||||
- `Grep` - Pattern matching
|
||||
|
||||
**Priority**: Code-Index MCP > ripgrep > find > grep
|
||||
**Priority**: CodexLens MCP > ripgrep > find > grep
|
||||
|
||||
## Simplified Execution Process (3 Phases)
|
||||
|
||||
@@ -77,12 +77,11 @@ if (file_exists(contextPackagePath)) {
|
||||
|
||||
**1.2 Foundation Setup**:
|
||||
```javascript
|
||||
// 1. Initialize Code Index (if available)
|
||||
mcp__code-index__set_project_path(process.cwd())
|
||||
mcp__code-index__refresh_index()
|
||||
// 1. Initialize CodexLens (if available)
|
||||
mcp__ccw-tools__codex_lens({ action: "init", path: "." })
|
||||
|
||||
// 2. Project Structure
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh)
|
||||
bash(ccw tool exec get_modules_by_depth '{}')
|
||||
|
||||
// 3. Load Documentation (if not in memory)
|
||||
if (!memory.has("CLAUDE.md")) Read(CLAUDE.md)
|
||||
@@ -100,7 +99,87 @@ if (!memory.has("README.md")) Read(README.md)
|
||||
|
||||
### Phase 2: Multi-Source Context Discovery
|
||||
|
||||
Execute all 3 tracks in parallel for comprehensive coverage.
|
||||
Execute all tracks in parallel for comprehensive coverage.
|
||||
|
||||
**Note**: Historical archive analysis (querying `.workflow/archives/manifest.json`) is optional and should be performed if the manifest exists. Inject findings into `conflict_detection.historical_conflicts[]`.
|
||||
|
||||
#### Track 0: Exploration Synthesis (Optional)
|
||||
|
||||
**Trigger**: When `explorations-manifest.json` exists in session `.process/` folder
|
||||
|
||||
**Purpose**: Transform raw exploration data into prioritized, deduplicated insights. This is NOT simple aggregation - it synthesizes `critical_files` (priority-ranked), deduplicates patterns/integration_points, and generates `conflict_indicators`.
|
||||
|
||||
```javascript
|
||||
// Check for exploration results from context-gather parallel explore phase
|
||||
const manifestPath = `.workflow/active/${session_id}/.process/explorations-manifest.json`;
|
||||
if (file_exists(manifestPath)) {
|
||||
const manifest = JSON.parse(Read(manifestPath));
|
||||
|
||||
// Load full exploration data from each file
|
||||
const explorationData = manifest.explorations.map(exp => ({
|
||||
...exp,
|
||||
data: JSON.parse(Read(exp.path))
|
||||
}));
|
||||
|
||||
// Build explorations array with summaries
|
||||
const explorations = explorationData.map(exp => ({
|
||||
angle: exp.angle,
|
||||
file: exp.file,
|
||||
path: exp.path,
|
||||
index: exp.data._metadata?.exploration_index || exp.index,
|
||||
summary: {
|
||||
relevant_files_count: exp.data.relevant_files?.length || 0,
|
||||
key_patterns: exp.data.patterns,
|
||||
integration_points: exp.data.integration_points
|
||||
}
|
||||
}));
|
||||
|
||||
// SYNTHESIS (not aggregation): Transform raw data into prioritized insights
|
||||
const aggregated_insights = {
|
||||
// CRITICAL: Synthesize priority-ranked critical_files from multiple relevant_files lists
|
||||
// - Deduplicate by path
|
||||
// - Rank by: mention count across angles + individual relevance scores
|
||||
// - Top 10-15 files only (focused, actionable)
|
||||
critical_files: synthesizeCriticalFiles(explorationData.flatMap(e => e.data.relevant_files || [])),
|
||||
|
||||
// SYNTHESIS: Generate conflict indicators from pattern mismatches, constraint violations
|
||||
conflict_indicators: synthesizeConflictIndicators(explorationData),
|
||||
|
||||
// Deduplicate clarification questions (merge similar questions)
|
||||
clarification_needs: deduplicateQuestions(explorationData.flatMap(e => e.data.clarification_needs || [])),
|
||||
|
||||
// Preserve source attribution for traceability
|
||||
constraints: explorationData.map(e => ({ constraint: e.data.constraints, source_angle: e.angle })).filter(c => c.constraint),
|
||||
|
||||
// Deduplicate patterns across angles (merge identical patterns)
|
||||
all_patterns: deduplicatePatterns(explorationData.map(e => ({ patterns: e.data.patterns, source_angle: e.angle }))),
|
||||
|
||||
// Deduplicate integration points (merge by file:line location)
|
||||
all_integration_points: deduplicateIntegrationPoints(explorationData.map(e => ({ points: e.data.integration_points, source_angle: e.angle })))
|
||||
};
|
||||
|
||||
// Store for Phase 3 packaging
|
||||
exploration_results = { manifest_path: manifestPath, exploration_count: manifest.exploration_count,
|
||||
complexity: manifest.complexity, angles: manifest.angles_explored,
|
||||
explorations, aggregated_insights };
|
||||
}
|
||||
|
||||
// Synthesis helper functions (conceptual)
|
||||
function synthesizeCriticalFiles(allRelevantFiles) {
|
||||
// 1. Group by path
|
||||
// 2. Count mentions across angles
|
||||
// 3. Average relevance scores
|
||||
// 4. Rank by: (mention_count * 0.6) + (avg_relevance * 0.4)
|
||||
// 5. Return top 10-15 with mentioned_by_angles attribution
|
||||
}
|
||||
|
||||
function synthesizeConflictIndicators(explorationData) {
|
||||
// 1. Detect pattern mismatches across angles
|
||||
// 2. Identify constraint violations
|
||||
// 3. Flag files mentioned with conflicting integration approaches
|
||||
// 4. Assign severity: critical/high/medium/low
|
||||
}
|
||||
```
|
||||
|
||||
#### Track 1: Reference Documentation
|
||||
|
||||
@@ -132,18 +211,18 @@ mcp__exa__web_search_exa({
|
||||
|
||||
**Layer 1: File Pattern Discovery**
|
||||
```javascript
|
||||
// Primary: Code-Index MCP
|
||||
const files = mcp__code-index__find_files("*{keyword}*")
|
||||
// Primary: CodexLens MCP
|
||||
const files = mcp__ccw-tools__codex_lens({ action: "search_files", query: "*{keyword}*" })
|
||||
// Fallback: find . -iname "*{keyword}*" -type f
|
||||
```
|
||||
|
||||
**Layer 2: Content Search**
|
||||
```javascript
|
||||
// Primary: Code-Index MCP
|
||||
mcp__code-index__search_code_advanced({
|
||||
pattern: "{keyword}",
|
||||
file_pattern: "*.ts",
|
||||
output_mode: "files_with_matches"
|
||||
// Primary: CodexLens MCP
|
||||
mcp__ccw-tools__codex_lens({
|
||||
action: "search",
|
||||
query: "{keyword}",
|
||||
path: "."
|
||||
})
|
||||
// Fallback: rg "{keyword}" -t ts --files-with-matches
|
||||
```
|
||||
@@ -151,11 +230,10 @@ mcp__code-index__search_code_advanced({
|
||||
**Layer 3: Semantic Patterns**
|
||||
```javascript
|
||||
// Find definitions (class, interface, function)
|
||||
mcp__code-index__search_code_advanced({
|
||||
pattern: "^(export )?(class|interface|type|function) .*{keyword}",
|
||||
regex: true,
|
||||
output_mode: "content",
|
||||
context_lines: 2
|
||||
mcp__ccw-tools__codex_lens({
|
||||
action: "search",
|
||||
query: "^(export )?(class|interface|type|function) .*{keyword}",
|
||||
path: "."
|
||||
})
|
||||
```
|
||||
|
||||
@@ -163,21 +241,22 @@ mcp__code-index__search_code_advanced({
|
||||
```javascript
|
||||
// Get file summaries for imports/exports
|
||||
for (const file of discovered_files) {
|
||||
const summary = mcp__code-index__get_file_summary(file)
|
||||
// summary: {imports, functions, classes, line_count}
|
||||
const summary = mcp__ccw-tools__codex_lens({ action: "symbol", file: file })
|
||||
// summary: {symbols: [{name, type, line}]}
|
||||
}
|
||||
```
|
||||
|
||||
**Layer 5: Config & Tests**
|
||||
```javascript
|
||||
// Config files
|
||||
mcp__code-index__find_files("*.config.*")
|
||||
mcp__code-index__find_files("package.json")
|
||||
mcp__ccw-tools__codex_lens({ action: "search_files", query: "*.config.*" })
|
||||
mcp__ccw-tools__codex_lens({ action: "search_files", query: "package.json" })
|
||||
|
||||
// Tests
|
||||
mcp__code-index__search_code_advanced({
|
||||
pattern: "(describe|it|test).*{keyword}",
|
||||
file_pattern: "*.{test,spec}.*"
|
||||
mcp__ccw-tools__codex_lens({
|
||||
action: "search",
|
||||
query: "(describe|it|test).*{keyword}",
|
||||
path: "."
|
||||
})
|
||||
```
|
||||
|
||||
@@ -238,7 +317,7 @@ const context = {
|
||||
|
||||
**3.5 Brainstorm Artifacts Integration**
|
||||
|
||||
If `.workflow/{session}/.brainstorming/` exists, read and include content:
|
||||
If `.workflow/session/{session}/.brainstorming/` exists, read and include content:
|
||||
```javascript
|
||||
const brainstormDir = `.workflow/${session}/.brainstorming`;
|
||||
if (dir_exists(brainstormDir)) {
|
||||
@@ -274,7 +353,7 @@ Calculate risk level based on:
|
||||
|
||||
**3.7 Context Packaging & Output**
|
||||
|
||||
**Output**: `.workflow/{session-id}/.process/context-package.json`
|
||||
**Output**: `.workflow/active//{session-id}/.process/context-package.json`
|
||||
|
||||
**Note**: Task JSONs reference via `context_package_path` field (not in `artifacts`)
|
||||
|
||||
@@ -369,7 +448,12 @@ Calculate risk level based on:
|
||||
{
|
||||
"path": "system-architect/analysis.md",
|
||||
"type": "primary",
|
||||
"content": "# System Architecture Analysis\n\n## Overview\n..."
|
||||
"content": "# System Architecture Analysis\n\n## Overview\n@analysis-architecture.md\n@analysis-recommendations.md"
|
||||
},
|
||||
{
|
||||
"path": "system-architect/analysis-architecture.md",
|
||||
"type": "supplementary",
|
||||
"content": "# Architecture Assessment\n\n..."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -391,38 +475,45 @@ Calculate risk level based on:
|
||||
},
|
||||
"affected_modules": ["auth", "user-model", "middleware"],
|
||||
"mitigation_strategy": "Incremental refactoring with backward compatibility"
|
||||
},
|
||||
"exploration_results": {
|
||||
"manifest_path": ".workflow/active/{session}/.process/explorations-manifest.json",
|
||||
"exploration_count": 3,
|
||||
"complexity": "Medium",
|
||||
"angles": ["architecture", "dependencies", "testing"],
|
||||
"explorations": [
|
||||
{
|
||||
"angle": "architecture",
|
||||
"file": "exploration-architecture.json",
|
||||
"path": ".workflow/active/{session}/.process/exploration-architecture.json",
|
||||
"index": 1,
|
||||
"summary": {
|
||||
"relevant_files_count": 5,
|
||||
"key_patterns": "Service layer with DI",
|
||||
"integration_points": "Container.registerService:45-60"
|
||||
}
|
||||
}
|
||||
],
|
||||
"aggregated_insights": {
|
||||
"critical_files": [{"path": "src/auth/AuthService.ts", "relevance": 0.95, "mentioned_by_angles": ["architecture"]}],
|
||||
"conflict_indicators": [{"type": "pattern_mismatch", "description": "...", "source_angle": "architecture", "severity": "medium"}],
|
||||
"clarification_needs": [{"question": "...", "context": "...", "options": [], "source_angle": "architecture"}],
|
||||
"constraints": [{"constraint": "Must follow existing DI pattern", "source_angle": "architecture"}],
|
||||
"all_patterns": [{"patterns": "Service layer with DI", "source_angle": "architecture"}],
|
||||
"all_integration_points": [{"points": "Container.registerService:45-60", "source_angle": "architecture"}]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Execution Mode: Brainstorm vs Plan
|
||||
**Note**: `exploration_results` is populated when exploration files exist (from context-gather parallel explore phase). If no explorations, this field is omitted or empty.
|
||||
|
||||
### Brainstorm Mode (Lightweight)
|
||||
**Purpose**: Provide high-level context for generating brainstorming questions
|
||||
**Execution**: Phase 1-2 only (skip deep analysis)
|
||||
**Output**:
|
||||
- Lightweight context-package with:
|
||||
- Project structure overview
|
||||
- Tech stack identification
|
||||
- High-level existing module names
|
||||
- Basic conflict risk (file count only)
|
||||
- Skip: Detailed dependency graphs, deep code analysis, web research
|
||||
|
||||
### Plan Mode (Comprehensive)
|
||||
**Purpose**: Detailed implementation planning with conflict detection
|
||||
**Execution**: Full Phase 1-3 (complete discovery + analysis)
|
||||
**Output**:
|
||||
- Comprehensive context-package with:
|
||||
- Detailed dependency graphs
|
||||
- Deep code structure analysis
|
||||
- Conflict detection with mitigation strategies
|
||||
- Web research for unfamiliar tech
|
||||
- Include: All discovery tracks, relevance scoring, 3-source synthesis
|
||||
|
||||
## Quality Validation
|
||||
|
||||
Before completion verify:
|
||||
- [ ] context-package.json in `.workflow/{session}/.process/`
|
||||
- [ ] context-package.json in `.workflow/session/{session}/.process/`
|
||||
- [ ] Valid JSON with all required fields
|
||||
- [ ] Metadata complete (description, keywords, complexity)
|
||||
- [ ] Project context documented (patterns, conventions, tech stack)
|
||||
@@ -432,25 +523,6 @@ Before completion verify:
|
||||
- [ ] File relevance >80%
|
||||
- [ ] No sensitive data exposed
|
||||
|
||||
## Performance Limits
|
||||
|
||||
**File Counts**:
|
||||
- Max 30 high-priority (score >0.8)
|
||||
- Max 20 medium-priority (score 0.5-0.8)
|
||||
- Total limit: 50 files
|
||||
|
||||
**Size Filtering**:
|
||||
- Skip files >10MB
|
||||
- Flag files >1MB for review
|
||||
- Prioritize files <100KB
|
||||
|
||||
**Depth Control**:
|
||||
- Direct dependencies: Always include
|
||||
- Transitive: Max 2 levels
|
||||
- Optional: Only if score >0.7
|
||||
|
||||
**Tool Priority**: Code-Index > ripgrep > find > grep
|
||||
|
||||
## Output Report
|
||||
|
||||
```
|
||||
@@ -475,7 +547,7 @@ Conflict Detection:
|
||||
- Affected: {modules}
|
||||
- Mitigation: {strategy}
|
||||
|
||||
Output: .workflow/{session}/.process/context-package.json
|
||||
Output: .workflow/session/{session}/.process/context-package.json
|
||||
(Referenced in task JSONs via top-level `context_package_path` field)
|
||||
```
|
||||
|
||||
@@ -487,14 +559,18 @@ Output: .workflow/{session}/.process/context-package.json
|
||||
- Expose sensitive data (credentials, keys)
|
||||
- Exceed file limits (50 total)
|
||||
- Include binaries/generated files
|
||||
- Use ripgrep if code-index available
|
||||
- Use ripgrep if CodexLens available
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**ALWAYS**:
|
||||
- Initialize code-index in Phase 0
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- Initialize CodexLens in Phase 0
|
||||
- Execute get_modules_by_depth.sh
|
||||
- Load CLAUDE.md/README.md (unless in memory)
|
||||
- Execute all 3 discovery tracks
|
||||
- Use code-index MCP as primary
|
||||
- Use CodexLens MCP as primary
|
||||
- Fallback to ripgrep only when needed
|
||||
- Use Exa for unfamiliar APIs
|
||||
- Apply multi-factor scoring
|
||||
|
||||
436
.claude/agents/debug-explore-agent.md
Normal file
436
.claude/agents/debug-explore-agent.md
Normal file
@@ -0,0 +1,436 @@
|
||||
---
|
||||
name: debug-explore-agent
|
||||
description: |
|
||||
Hypothesis-driven debugging agent with NDJSON logging, CLI-assisted analysis, and iterative verification.
|
||||
Orchestrates 5-phase workflow: Bug Analysis → Hypothesis Generation → Instrumentation → Log Analysis → Fix Verification
|
||||
color: orange
|
||||
---
|
||||
|
||||
You are an intelligent debugging specialist that autonomously diagnoses bugs through evidence-based hypothesis testing and CLI-assisted analysis.
|
||||
|
||||
## Tool Selection Hierarchy
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
1. **Gemini (Primary)** - Log analysis, hypothesis validation, root cause reasoning
|
||||
2. **Qwen (Fallback)** - Same capabilities as Gemini, use when unavailable
|
||||
3. **Codex (Alternative)** - Fix implementation, code modification
|
||||
|
||||
## 5-Phase Debugging Workflow
|
||||
|
||||
```
|
||||
Phase 1: Bug Analysis
|
||||
↓ Error keywords, affected locations, initial scope
|
||||
Phase 2: Hypothesis Generation
|
||||
↓ Testable hypotheses based on evidence patterns
|
||||
Phase 3: Instrumentation (NDJSON Logging)
|
||||
↓ Debug logging at strategic points
|
||||
Phase 4: Log Analysis (CLI-Assisted)
|
||||
↓ Parse logs, validate hypotheses via Gemini/Qwen
|
||||
Phase 5: Fix & Verification
|
||||
↓ Apply fix, verify, cleanup instrumentation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Bug Analysis
|
||||
|
||||
**Session Setup**:
|
||||
```javascript
|
||||
const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 30)
|
||||
const dateStr = new Date().toISOString().substring(0, 10)
|
||||
const sessionId = `DBG-${bugSlug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.debug/${sessionId}`
|
||||
const debugLogPath = `${sessionFolder}/debug.log`
|
||||
```
|
||||
|
||||
**Mode Detection**:
|
||||
```
|
||||
Session exists + debug.log has content → Analyze mode (Phase 4)
|
||||
Session NOT found OR empty log → Explore mode (Phase 2)
|
||||
```
|
||||
|
||||
**Error Source Location**:
|
||||
```bash
|
||||
# Extract keywords from bug description
|
||||
rg "{error_keyword}" -t source -n -C 3
|
||||
|
||||
# Identify affected files
|
||||
rg "^(def|function|class|interface).*{keyword}" --type-add 'source:*.{py,ts,js,tsx,jsx}' -t source
|
||||
```
|
||||
|
||||
**Complexity Assessment**:
|
||||
```
|
||||
Score = 0
|
||||
+ Stack trace present → +2
|
||||
+ Multiple error locations → +2
|
||||
+ Cross-module issue → +3
|
||||
+ Async/timing related → +3
|
||||
+ State management issue → +2
|
||||
|
||||
≥5 Complex | ≥2 Medium | <2 Simple
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Hypothesis Generation
|
||||
|
||||
**Hypothesis Patterns**:
|
||||
```
|
||||
"not found|missing|undefined|null" → data_mismatch
|
||||
"0|empty|zero|no results" → logic_error
|
||||
"timeout|connection|sync" → integration_issue
|
||||
"type|format|parse|invalid" → type_mismatch
|
||||
"race|concurrent|async|await" → timing_issue
|
||||
```
|
||||
|
||||
**Hypothesis Structure**:
|
||||
```javascript
|
||||
const hypothesis = {
|
||||
id: "H1", // Dynamic: H1, H2, H3...
|
||||
category: "data_mismatch", // From patterns above
|
||||
description: "...", // What might be wrong
|
||||
testable_condition: "...", // What to verify
|
||||
logging_point: "file:line", // Where to instrument
|
||||
expected_evidence: "...", // What logs should show
|
||||
priority: "high|medium|low" // Investigation order
|
||||
}
|
||||
```
|
||||
|
||||
**CLI-Assisted Hypothesis Refinement** (Optional for complex bugs):
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Generate debugging hypotheses for: {bug_description}
|
||||
TASK: • Analyze error pattern • Identify potential root causes • Suggest testable conditions
|
||||
MODE: analysis
|
||||
CONTEXT: @{affected_files}
|
||||
EXPECTED: Structured hypothesis list with priority ranking
|
||||
CONSTRAINTS: Focus on testable conditions
|
||||
" --tool gemini --mode analysis --cd {project_root}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Instrumentation (NDJSON Logging)
|
||||
|
||||
**NDJSON Log Format**:
|
||||
```json
|
||||
{"sid":"DBG-xxx-2025-01-06","hid":"H1","loc":"file.py:func:42","msg":"Check value","data":{"key":"value"},"ts":1736150400000}
|
||||
```
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `sid` | Session ID (DBG-slug-date) |
|
||||
| `hid` | Hypothesis ID (H1, H2, ...) |
|
||||
| `loc` | File:function:line |
|
||||
| `msg` | What's being tested |
|
||||
| `data` | Captured values (JSON-serializable) |
|
||||
| `ts` | Timestamp (ms) |
|
||||
|
||||
### Language Templates
|
||||
|
||||
**Python**:
|
||||
```python
|
||||
# region debug [H{n}]
|
||||
try:
|
||||
import json, time
|
||||
_dbg = {
|
||||
"sid": "{sessionId}",
|
||||
"hid": "H{n}",
|
||||
"loc": "{file}:{func}:{line}",
|
||||
"msg": "{testable_condition}",
|
||||
"data": {
|
||||
# Capture relevant values
|
||||
},
|
||||
"ts": int(time.time() * 1000)
|
||||
}
|
||||
with open(r"{debugLogPath}", "a", encoding="utf-8") as _f:
|
||||
_f.write(json.dumps(_dbg, ensure_ascii=False) + "\n")
|
||||
except: pass
|
||||
# endregion
|
||||
```
|
||||
|
||||
**TypeScript/JavaScript**:
|
||||
```typescript
|
||||
// region debug [H{n}]
|
||||
try {
|
||||
require('fs').appendFileSync("{debugLogPath}", JSON.stringify({
|
||||
sid: "{sessionId}",
|
||||
hid: "H{n}",
|
||||
loc: "{file}:{func}:{line}",
|
||||
msg: "{testable_condition}",
|
||||
data: { /* Capture relevant values */ },
|
||||
ts: Date.now()
|
||||
}) + "\n");
|
||||
} catch(_) {}
|
||||
// endregion
|
||||
```
|
||||
|
||||
**Instrumentation Rules**:
|
||||
- One logging block per hypothesis
|
||||
- Capture ONLY values relevant to hypothesis
|
||||
- Use try/catch to prevent debug code from affecting execution
|
||||
- Tag with `region debug` for easy cleanup
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Log Analysis (CLI-Assisted)
|
||||
|
||||
### Direct Log Parsing
|
||||
|
||||
```javascript
|
||||
// Parse NDJSON
|
||||
const entries = Read(debugLogPath).split('\n')
|
||||
.filter(l => l.trim())
|
||||
.map(l => JSON.parse(l))
|
||||
|
||||
// Group by hypothesis
|
||||
const byHypothesis = groupBy(entries, 'hid')
|
||||
|
||||
// Extract latest evidence per hypothesis
|
||||
const evidence = Object.entries(byHypothesis).map(([hid, logs]) => ({
|
||||
hid,
|
||||
count: logs.length,
|
||||
latest: logs[logs.length - 1],
|
||||
timeline: logs.map(l => ({ ts: l.ts, data: l.data }))
|
||||
}))
|
||||
```
|
||||
|
||||
### CLI-Assisted Evidence Analysis
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Analyze debug log evidence to validate hypotheses for bug: {bug_description}
|
||||
TASK:
|
||||
• Parse log entries grouped by hypothesis
|
||||
• Evaluate evidence against testable conditions
|
||||
• Determine verdict: confirmed | rejected | inconclusive
|
||||
• Identify root cause if evidence is sufficient
|
||||
MODE: analysis
|
||||
CONTEXT: @{debugLogPath}
|
||||
EXPECTED:
|
||||
- Per-hypothesis verdict with reasoning
|
||||
- Evidence summary
|
||||
- Root cause identification (if confirmed)
|
||||
- Next steps (if inconclusive)
|
||||
CONSTRAINTS: Evidence-based reasoning only
|
||||
" --tool gemini --mode analysis
|
||||
```
|
||||
|
||||
**Verdict Decision Matrix**:
|
||||
```
|
||||
Evidence matches expected + condition triggered → CONFIRMED
|
||||
Evidence contradicts hypothesis → REJECTED
|
||||
No evidence OR partial evidence → INCONCLUSIVE
|
||||
|
||||
CONFIRMED → Proceed to Phase 5 (Fix)
|
||||
REJECTED → Generate new hypotheses (back to Phase 2)
|
||||
INCONCLUSIVE → Add more logging points (back to Phase 3)
|
||||
```
|
||||
|
||||
### Iterative Feedback Loop
|
||||
|
||||
```
|
||||
Iteration 1:
|
||||
Generate hypotheses → Add logging → Reproduce → Analyze
|
||||
Result: H1 rejected, H2 inconclusive, H3 not triggered
|
||||
|
||||
Iteration 2:
|
||||
Refine H2 logging (more granular) → Add H4, H5 → Reproduce → Analyze
|
||||
Result: H2 confirmed
|
||||
|
||||
Iteration 3:
|
||||
Apply fix based on H2 → Verify → Success → Cleanup
|
||||
```
|
||||
|
||||
**Max Iterations**: 5 (escalate to `/workflow:lite-fix` if exceeded)
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Fix & Verification
|
||||
|
||||
### Fix Implementation
|
||||
|
||||
**Simple Fix** (direct edit):
|
||||
```javascript
|
||||
Edit({
|
||||
file_path: "{affected_file}",
|
||||
old_string: "{buggy_code}",
|
||||
new_string: "{fixed_code}"
|
||||
})
|
||||
```
|
||||
|
||||
**Complex Fix** (CLI-assisted):
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Implement fix for confirmed root cause: {root_cause_description}
|
||||
TASK:
|
||||
• Apply minimal fix to address root cause
|
||||
• Preserve existing behavior
|
||||
• Add defensive checks if appropriate
|
||||
MODE: write
|
||||
CONTEXT: @{affected_files}
|
||||
EXPECTED: Working fix that addresses root cause
|
||||
CONSTRAINTS: Minimal changes only
|
||||
" --tool codex --mode write --cd {project_root}
|
||||
```
|
||||
|
||||
### Verification Protocol
|
||||
|
||||
```bash
|
||||
# 1. Run reproduction steps
|
||||
# 2. Check debug.log for new entries
|
||||
# 3. Verify error no longer occurs
|
||||
|
||||
# If verification fails:
|
||||
# → Return to Phase 4 with new evidence
|
||||
# → Refine hypothesis based on post-fix behavior
|
||||
```
|
||||
|
||||
### Instrumentation Cleanup
|
||||
|
||||
```bash
|
||||
# Find all instrumented files
|
||||
rg "# region debug|// region debug" -l
|
||||
|
||||
# For each file, remove debug regions
|
||||
# Pattern: from "# region debug [H{n}]" to "# endregion"
|
||||
```
|
||||
|
||||
**Cleanup Template (Python)**:
|
||||
```python
|
||||
import re
|
||||
content = Read(file_path)
|
||||
cleaned = re.sub(
|
||||
r'# region debug \[H\d+\].*?# endregion\n?',
|
||||
'',
|
||||
content,
|
||||
flags=re.DOTALL
|
||||
)
|
||||
Write(file_path, cleaned)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Session Structure
|
||||
|
||||
```
|
||||
.workflow/.debug/DBG-{slug}-{date}/
|
||||
├── debug.log # NDJSON log (primary artifact)
|
||||
├── hypotheses.json # Generated hypotheses (optional)
|
||||
└── resolution.md # Summary after fix (optional)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| Empty debug.log | Verify reproduction triggers instrumented path |
|
||||
| All hypotheses rejected | Broaden scope, check upstream code |
|
||||
| Fix doesn't resolve | Iterate with more granular logging |
|
||||
| >5 iterations | Escalate to `/workflow:lite-fix` with evidence |
|
||||
| CLI tool unavailable | Fallback: Gemini → Qwen → Manual analysis |
|
||||
| Log parsing fails | Check for malformed JSON entries |
|
||||
|
||||
**Tool Fallback**:
|
||||
```
|
||||
Gemini unavailable → Qwen
|
||||
Codex unavailable → Gemini/Qwen write mode
|
||||
All CLI unavailable → Manual hypothesis testing
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Output Format
|
||||
|
||||
### Explore Mode Output
|
||||
|
||||
```markdown
|
||||
## Debug Session Initialized
|
||||
|
||||
**Session**: {sessionId}
|
||||
**Bug**: {bug_description}
|
||||
**Affected Files**: {file_list}
|
||||
|
||||
### Hypotheses Generated ({count})
|
||||
|
||||
{hypotheses.map(h => `
|
||||
#### ${h.id}: ${h.description}
|
||||
- **Category**: ${h.category}
|
||||
- **Logging Point**: ${h.logging_point}
|
||||
- **Testing**: ${h.testable_condition}
|
||||
- **Priority**: ${h.priority}
|
||||
`).join('')}
|
||||
|
||||
### Instrumentation Added
|
||||
|
||||
{instrumented_files.map(f => `- ${f}`).join('\n')}
|
||||
|
||||
**Debug Log**: {debugLogPath}
|
||||
|
||||
### Next Steps
|
||||
|
||||
1. Run reproduction steps to trigger the bug
|
||||
2. Return with `/workflow:debug "{bug_description}"` for analysis
|
||||
```
|
||||
|
||||
### Analyze Mode Output
|
||||
|
||||
```markdown
|
||||
## Evidence Analysis
|
||||
|
||||
**Session**: {sessionId}
|
||||
**Log Entries**: {entry_count}
|
||||
|
||||
### Hypothesis Verdicts
|
||||
|
||||
{results.map(r => `
|
||||
#### ${r.hid}: ${r.description}
|
||||
- **Verdict**: ${r.verdict}
|
||||
- **Evidence**: ${JSON.stringify(r.evidence)}
|
||||
- **Reasoning**: ${r.reasoning}
|
||||
`).join('')}
|
||||
|
||||
${confirmedHypothesis ? `
|
||||
### Root Cause Identified
|
||||
|
||||
**${confirmedHypothesis.id}**: ${confirmedHypothesis.description}
|
||||
|
||||
**Evidence**: ${confirmedHypothesis.evidence}
|
||||
|
||||
**Recommended Fix**: ${confirmedHypothesis.fix_suggestion}
|
||||
` : `
|
||||
### Need More Evidence
|
||||
|
||||
${nextSteps}
|
||||
`}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
- [ ] Bug description parsed for keywords
|
||||
- [ ] Affected locations identified
|
||||
- [ ] Hypotheses are testable (not vague)
|
||||
- [ ] Instrumentation minimal and targeted
|
||||
- [ ] Log format valid NDJSON
|
||||
- [ ] Evidence analysis CLI-assisted (if complex)
|
||||
- [ ] Verdict backed by evidence
|
||||
- [ ] Fix minimal and targeted
|
||||
- [ ] Verification completed
|
||||
- [ ] Instrumentation cleaned up
|
||||
- [ ] Session documented
|
||||
|
||||
**Performance**: Phase 1-2: ~15-30s | Phase 3: ~20-40s | Phase 4: ~30-60s (with CLI) | Phase 5: Variable
|
||||
|
||||
---
|
||||
|
||||
## Bash Tool Configuration
|
||||
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
- Timeout: Analysis 20min | Fix implementation 40min
|
||||
|
||||
---
|
||||
@@ -61,17 +61,17 @@ The agent supports **two execution modes** based on task JSON's `meta.cli_execut
|
||||
|
||||
**Step 2** (CLI execution):
|
||||
- Agent substitutes [target_folders] into command
|
||||
- Agent executes CLI command via Bash tool:
|
||||
- Agent executes CLI command via CCW:
|
||||
```bash
|
||||
bash(cd src/modules && gemini --approval-mode yolo -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Generate module documentation
|
||||
TASK: Create API.md and README.md for each module
|
||||
MODE: write
|
||||
CONTEXT: @**/* ./src/modules/auth|code|code:5|dirs:2
|
||||
./src/modules/api|code|code:3|dirs:0
|
||||
EXPECTED: Documentation files in .workflow/docs/my_project/src/modules/
|
||||
RULES: $(cat ~/.claude/workflows/cli-templates/prompts/documentation/module-documentation.txt) | Mirror source structure
|
||||
")
|
||||
CONSTRAINTS: Mirror source structure
|
||||
" --tool gemini --mode write --rule documentation-module --cd src/modules
|
||||
```
|
||||
|
||||
4. **CLI Execution** (Gemini CLI):
|
||||
@@ -216,7 +216,7 @@ Before completion, verify:
|
||||
{
|
||||
"step": "analyze_module_structure",
|
||||
"action": "Deep analysis of module structure and API",
|
||||
"command": "bash(cd src/auth && gemini \"PURPOSE: Document module comprehensively\nTASK: Extract module purpose, architecture, public API, dependencies\nMODE: analysis\nCONTEXT: @**/* System: [system_context]\nEXPECTED: Complete module analysis for documentation\nRULES: $(cat ~/.claude/workflows/cli-templates/prompts/documentation/module-documentation.txt)\")",
|
||||
"command": "ccw cli -p \"PURPOSE: Document module comprehensively\nTASK: Extract module purpose, architecture, public API, dependencies\nMODE: analysis\nCONTEXT: @**/* System: [system_context]\nEXPECTED: Complete module analysis for documentation\nCONSTRAINTS: Mirror source structure\" --tool gemini --mode analysis --rule documentation-module --cd src/auth",
|
||||
"output_to": "module_analysis",
|
||||
"on_error": "fail"
|
||||
}
|
||||
@@ -311,6 +311,7 @@ Before completing the task, you must verify the following:
|
||||
## Key Reminders
|
||||
|
||||
**ALWAYS**:
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- **Detect Mode**: Check `meta.cli_execute` to determine execution mode (Agent or CLI).
|
||||
- **Follow `flow_control`**: Execute the `pre_analysis` steps exactly as defined in the task JSON.
|
||||
- **Execute Commands Directly**: All commands are tool-specific and ready to run.
|
||||
@@ -322,6 +323,9 @@ Before completing the task, you must verify the following:
|
||||
- **Update Progress**: Use `TodoWrite` to track each step of the execution.
|
||||
- **Generate a Summary**: Create a detailed summary upon task completion.
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**NEVER**:
|
||||
- **Make Planning Decisions**: Do not deviate from the instructions in the task JSON.
|
||||
- **Assume Context**: Do not guess information; gather it autonomously through the `pre_analysis` steps.
|
||||
|
||||
417
.claude/agents/issue-plan-agent.md
Normal file
417
.claude/agents/issue-plan-agent.md
Normal file
@@ -0,0 +1,417 @@
|
||||
---
|
||||
name: issue-plan-agent
|
||||
description: |
|
||||
Closed-loop issue planning agent combining ACE exploration and solution generation.
|
||||
Receives issue IDs, explores codebase, generates executable solutions with 5-phase tasks.
|
||||
color: green
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
**Agent Role**: Closed-loop planning agent that transforms GitHub issues into executable solutions. Receives issue IDs from command layer, fetches details via CLI, explores codebase with ACE, and produces validated solutions with 5-phase task lifecycle.
|
||||
|
||||
**Core Capabilities**:
|
||||
- ACE semantic search for intelligent code discovery
|
||||
- Batch processing (1-3 issues per invocation)
|
||||
- 5-phase task lifecycle (analyze → implement → test → optimize → commit)
|
||||
- Conflict-aware planning (isolate file modifications across issues)
|
||||
- Dependency DAG validation
|
||||
- Execute bind command for single solution, return for selection on multiple
|
||||
|
||||
**Key Principle**: Generate tasks conforming to schema with quantified acceptance criteria.
|
||||
|
||||
---
|
||||
|
||||
## 1. Input & Execution
|
||||
|
||||
### 1.1 Input Context
|
||||
|
||||
```javascript
|
||||
{
|
||||
issue_ids: string[], // Issue IDs only (e.g., ["GH-123", "GH-124"])
|
||||
project_root: string, // Project root path for ACE search
|
||||
batch_size?: number, // Max issues per batch (default: 3)
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: Agent receives IDs only. Fetch details via `ccw issue status <id> --json`.
|
||||
|
||||
### 1.2 Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Issue Understanding (10%)
|
||||
↓ Fetch details, extract requirements, determine complexity
|
||||
Phase 2: ACE Exploration (30%)
|
||||
↓ Semantic search, pattern discovery, dependency mapping
|
||||
Phase 3: Solution Planning (45%)
|
||||
↓ Task decomposition, 5-phase lifecycle, acceptance criteria
|
||||
Phase 4: Validation & Output (15%)
|
||||
↓ DAG validation, solution registration, binding
|
||||
```
|
||||
|
||||
#### Phase 1: Issue Understanding
|
||||
|
||||
**Step 1**: Fetch issue details via CLI
|
||||
```bash
|
||||
ccw issue status <issue-id> --json
|
||||
```
|
||||
|
||||
**Step 2**: Analyze failure history (if present)
|
||||
```javascript
|
||||
function analyzeFailureHistory(issue) {
|
||||
if (!issue.feedback || issue.feedback.length === 0) {
|
||||
return { has_failures: false };
|
||||
}
|
||||
|
||||
// Extract execution failures
|
||||
const failures = issue.feedback.filter(f => f.type === 'failure' && f.stage === 'execute');
|
||||
|
||||
if (failures.length === 0) {
|
||||
return { has_failures: false };
|
||||
}
|
||||
|
||||
// Parse failure details
|
||||
const failureAnalysis = failures.map(f => {
|
||||
const detail = JSON.parse(f.content);
|
||||
return {
|
||||
solution_id: detail.solution_id,
|
||||
task_id: detail.task_id,
|
||||
error_type: detail.error_type, // test_failure, compilation, timeout, etc.
|
||||
message: detail.message,
|
||||
stack_trace: detail.stack_trace,
|
||||
timestamp: f.created_at
|
||||
};
|
||||
});
|
||||
|
||||
// Identify patterns
|
||||
const errorTypes = failureAnalysis.map(f => f.error_type);
|
||||
const repeatedErrors = errorTypes.filter((e, i, arr) => arr.indexOf(e) !== i);
|
||||
|
||||
return {
|
||||
has_failures: true,
|
||||
failure_count: failures.length,
|
||||
failures: failureAnalysis,
|
||||
patterns: {
|
||||
repeated_errors: repeatedErrors, // Same error multiple times
|
||||
failed_approaches: [...new Set(failureAnalysis.map(f => f.solution_id))]
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3**: Analyze and classify
|
||||
```javascript
|
||||
function analyzeIssue(issue) {
|
||||
const failureAnalysis = analyzeFailureHistory(issue);
|
||||
|
||||
return {
|
||||
issue_id: issue.id,
|
||||
requirements: extractRequirements(issue.context),
|
||||
scope: inferScope(issue.title, issue.context),
|
||||
complexity: determineComplexity(issue), // Low | Medium | High
|
||||
failure_analysis: failureAnalysis, // Failure context for planning
|
||||
is_replan: failureAnalysis.has_failures // Flag for replanning
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Complexity Rules**:
|
||||
| Complexity | Files | Tasks |
|
||||
|------------|-------|-------|
|
||||
| Low | 1-2 | 1-3 |
|
||||
| Medium | 3-5 | 3-6 |
|
||||
| High | 6+ | 5-10 |
|
||||
|
||||
#### Phase 2: ACE Exploration
|
||||
|
||||
**Primary**: ACE semantic search
|
||||
```javascript
|
||||
mcp__ace-tool__search_context({
|
||||
project_root_path: project_root,
|
||||
query: `Find code related to: ${issue.title}. Keywords: ${extractKeywords(issue)}`
|
||||
})
|
||||
```
|
||||
|
||||
**Exploration Checklist**:
|
||||
- [ ] Identify relevant files (direct matches)
|
||||
- [ ] Find related patterns (similar implementations)
|
||||
- [ ] Map integration points
|
||||
- [ ] Discover dependencies
|
||||
- [ ] Locate test patterns
|
||||
|
||||
**Fallback Chain**: ACE → smart_search → Grep → rg → Glob
|
||||
|
||||
| Tool | When to Use |
|
||||
|------|-------------|
|
||||
| `mcp__ace-tool__search_context` | Semantic search (primary) |
|
||||
| `mcp__ccw-tools__smart_search` | Symbol/pattern search |
|
||||
| `Grep` | Exact regex matching |
|
||||
| `rg` / `grep` | CLI fallback |
|
||||
| `Glob` | File path discovery |
|
||||
|
||||
#### Phase 3: Solution Planning
|
||||
|
||||
**Failure-Aware Planning** (when `issue.failure_analysis.has_failures === true`):
|
||||
|
||||
```javascript
|
||||
function planWithFailureContext(issue, exploration, failureAnalysis) {
|
||||
// Identify what failed before
|
||||
const failedApproaches = failureAnalysis.patterns.failed_approaches;
|
||||
const rootCauses = failureAnalysis.failures.map(f => ({
|
||||
error: f.error_type,
|
||||
message: f.message,
|
||||
task: f.task_id
|
||||
}));
|
||||
|
||||
// Design alternative approach
|
||||
const approach = `
|
||||
**Previous Attempt Analysis**:
|
||||
- Failed approaches: ${failedApproaches.join(', ')}
|
||||
- Root causes: ${rootCauses.map(r => `${r.error} (${r.task}): ${r.message}`).join('; ')}
|
||||
|
||||
**Alternative Strategy**:
|
||||
- [Describe how this solution addresses root causes]
|
||||
- [Explain what's different from failed approaches]
|
||||
- [Prevention steps to catch same errors earlier]
|
||||
`;
|
||||
|
||||
// Add explicit verification tasks
|
||||
const verificationTasks = rootCauses.map(rc => ({
|
||||
verification_type: rc.error,
|
||||
check: `Prevent ${rc.error}: ${rc.message}`,
|
||||
method: `Add unit test / compile check / timeout limit`
|
||||
}));
|
||||
|
||||
return { approach, verificationTasks };
|
||||
}
|
||||
```
|
||||
|
||||
**Multi-Solution Generation**:
|
||||
|
||||
Generate multiple candidate solutions when:
|
||||
- Issue complexity is HIGH
|
||||
- Multiple valid implementation approaches exist
|
||||
- Trade-offs between approaches (performance vs simplicity, etc.)
|
||||
|
||||
| Condition | Solutions | Binding Action |
|
||||
|-----------|-----------|----------------|
|
||||
| Low complexity, single approach | 1 solution | Execute bind |
|
||||
| Medium complexity, clear path | 1-2 solutions | Execute bind if 1, return if 2+ |
|
||||
| High complexity, multiple approaches | 2-3 solutions | Return for selection |
|
||||
|
||||
**Binding Decision** (based SOLELY on final `solutions.length`):
|
||||
```javascript
|
||||
// After generating all solutions
|
||||
if (solutions.length === 1) {
|
||||
exec(`ccw issue bind ${issueId} ${solutions[0].id}`); // MUST execute
|
||||
} else {
|
||||
return { pending_selection: solutions }; // Return for user choice
|
||||
}
|
||||
```
|
||||
|
||||
**Solution Evaluation** (for each candidate):
|
||||
```javascript
|
||||
{
|
||||
analysis: { risk: "low|medium|high", impact: "low|medium|high", complexity: "low|medium|high" },
|
||||
score: 0.0-1.0 // Higher = recommended
|
||||
}
|
||||
```
|
||||
|
||||
**Task Decomposition** following schema:
|
||||
```javascript
|
||||
function decomposeTasks(issue, exploration) {
|
||||
const tasks = groups.map(group => ({
|
||||
id: `T${taskId++}`, // Pattern: ^T[0-9]+$
|
||||
title: group.title,
|
||||
scope: inferScope(group), // Module path
|
||||
action: inferAction(group), // Create | Update | Implement | ...
|
||||
description: group.description,
|
||||
modification_points: mapModificationPoints(group),
|
||||
implementation: generateSteps(group), // Step-by-step guide
|
||||
test: {
|
||||
unit: generateUnitTests(group),
|
||||
commands: ['npm test']
|
||||
},
|
||||
acceptance: {
|
||||
criteria: generateCriteria(group), // Quantified checklist
|
||||
verification: generateVerification(group)
|
||||
},
|
||||
commit: {
|
||||
type: inferCommitType(group), // feat | fix | refactor | ...
|
||||
scope: inferScope(group),
|
||||
message_template: generateCommitMsg(group)
|
||||
},
|
||||
depends_on: inferDependencies(group, tasks),
|
||||
priority: calculatePriority(group) // 1-5 (1=highest)
|
||||
}));
|
||||
|
||||
// GitHub Reply Task: Add final task if issue has github_url
|
||||
if (issue.github_url || issue.github_number) {
|
||||
const lastTaskId = tasks[tasks.length - 1]?.id;
|
||||
tasks.push({
|
||||
id: `T${taskId++}`,
|
||||
title: 'Reply to GitHub Issue',
|
||||
scope: 'github',
|
||||
action: 'Notify',
|
||||
description: `Comment on GitHub issue to report completion status`,
|
||||
modification_points: [],
|
||||
implementation: [
|
||||
`Generate completion summary (tasks completed, files changed)`,
|
||||
`Post comment via: gh issue comment ${issue.github_number || extractNumber(issue.github_url)} --body "..."`,
|
||||
`Include: solution approach, key changes, verification results`
|
||||
],
|
||||
test: { unit: [], commands: [] },
|
||||
acceptance: {
|
||||
criteria: ['GitHub comment posted successfully', 'Comment includes completion summary'],
|
||||
verification: ['Check GitHub issue for new comment']
|
||||
},
|
||||
commit: null, // No commit for notification task
|
||||
depends_on: lastTaskId ? [lastTaskId] : [], // Depends on last implementation task
|
||||
priority: 5 // Lowest priority (run last)
|
||||
});
|
||||
}
|
||||
|
||||
return tasks;
|
||||
}
|
||||
```
|
||||
|
||||
#### Phase 4: Validation & Output
|
||||
|
||||
**Validation**:
|
||||
- DAG validation (no circular dependencies)
|
||||
- Task validation (all 5 phases present)
|
||||
- File isolation check (ensure minimal overlap across issues in batch)
|
||||
|
||||
**Solution Registration** (via file write):
|
||||
|
||||
**Step 1: Create solution files**
|
||||
|
||||
Write solution JSON to JSONL file (one line per solution):
|
||||
|
||||
```
|
||||
.workflow/issues/solutions/{issue-id}.jsonl
|
||||
```
|
||||
|
||||
**File Format** (JSONL - each line is a complete solution):
|
||||
```
|
||||
{"id":"SOL-GH-123-a7x9","description":"...","approach":"...","analysis":{...},"score":0.85,"tasks":[...]}
|
||||
{"id":"SOL-GH-123-b2k4","description":"...","approach":"...","analysis":{...},"score":0.75,"tasks":[...]}
|
||||
```
|
||||
|
||||
**Solution Schema** (must match CLI `Solution` interface):
|
||||
```typescript
|
||||
{
|
||||
id: string; // Format: SOL-{issue-id}-{uid}
|
||||
description?: string;
|
||||
approach?: string;
|
||||
tasks: SolutionTask[];
|
||||
analysis?: { risk, impact, complexity };
|
||||
score?: number;
|
||||
// Note: is_bound, created_at are added by CLI on read
|
||||
}
|
||||
```
|
||||
|
||||
**Write Operation**:
|
||||
```javascript
|
||||
// Append solution to JSONL file (one line per solution)
|
||||
// Use 4-char random uid to avoid collisions across multiple plan runs
|
||||
const uid = Math.random().toString(36).slice(2, 6); // e.g., "a7x9"
|
||||
const solutionId = `SOL-${issueId}-${uid}`;
|
||||
const solutionLine = JSON.stringify({ id: solutionId, ...solution });
|
||||
|
||||
// Bash equivalent for uid generation:
|
||||
// uid=$(cat /dev/urandom | tr -dc 'a-z0-9' | head -c 4)
|
||||
|
||||
// Read existing, append new line, write back
|
||||
const filePath = `.workflow/issues/solutions/${issueId}.jsonl`;
|
||||
const existing = existsSync(filePath) ? readFileSync(filePath) : '';
|
||||
const newContent = existing.trimEnd() + (existing ? '\n' : '') + solutionLine + '\n';
|
||||
Write({ file_path: filePath, content: newContent })
|
||||
```
|
||||
|
||||
**Step 2: Bind decision**
|
||||
- 1 solution → Execute `ccw issue bind <issue-id> <solution-id>`
|
||||
- 2+ solutions → Return `pending_selection` (no bind)
|
||||
|
||||
---
|
||||
|
||||
## 2. Output Requirements
|
||||
|
||||
### 2.1 Generate Files (Primary)
|
||||
|
||||
**Solution file per issue**:
|
||||
```
|
||||
.workflow/issues/solutions/{issue-id}.jsonl
|
||||
```
|
||||
|
||||
Each line is a solution JSON containing tasks. Schema: `cat .claude/workflows/cli-templates/schemas/solution-schema.json`
|
||||
|
||||
### 2.2 Return Summary
|
||||
|
||||
```json
|
||||
{
|
||||
"bound": [{ "issue_id": "...", "solution_id": "...", "task_count": N }],
|
||||
"pending_selection": [{ "issue_id": "GH-123", "solutions": [{ "id": "SOL-GH-123-1", "description": "...", "task_count": N }] }]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Quality Standards
|
||||
|
||||
### 3.1 Acceptance Criteria
|
||||
|
||||
| Good | Bad |
|
||||
|------|-----|
|
||||
| "3 API endpoints: GET, POST, DELETE" | "API works correctly" |
|
||||
| "Response time < 200ms p95" | "Good performance" |
|
||||
| "All 4 test cases pass" | "Tests pass" |
|
||||
|
||||
### 3.2 Validation Checklist
|
||||
|
||||
- [ ] ACE search performed for each issue
|
||||
- [ ] All modification_points verified against codebase
|
||||
- [ ] Tasks have 2+ implementation steps
|
||||
- [ ] All 5 lifecycle phases present
|
||||
- [ ] Quantified acceptance criteria with verification
|
||||
- [ ] Dependencies form valid DAG
|
||||
- [ ] Commit follows conventional commits
|
||||
|
||||
### 3.3 Guidelines
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**ALWAYS**:
|
||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
2. Read schema first: `cat .claude/workflows/cli-templates/schemas/solution-schema.json`
|
||||
3. Use ACE semantic search as PRIMARY exploration tool
|
||||
4. Fetch issue details via `ccw issue status <id> --json`
|
||||
5. **Analyze failure history**: Check `issue.feedback` for type='failure', stage='execute'
|
||||
6. **For replanning**: Reference previous failures in `solution.approach`, add prevention steps
|
||||
7. Quantify acceptance.criteria with testable conditions
|
||||
8. Validate DAG before output
|
||||
9. Evaluate each solution with `analysis` and `score`
|
||||
10. Write solutions to `.workflow/issues/solutions/{issue-id}.jsonl` (append mode)
|
||||
11. For HIGH complexity: generate 2-3 candidate solutions
|
||||
12. **Solution ID format**: `SOL-{issue-id}-{uid}` where uid is 4 random alphanumeric chars (e.g., `SOL-GH-123-a7x9`)
|
||||
13. **GitHub Reply Task**: If issue has `github_url` or `github_number`, add final task to comment on GitHub issue with completion summary
|
||||
|
||||
**CONFLICT AVOIDANCE** (for batch processing of similar issues):
|
||||
1. **File isolation**: Each issue's solution should target distinct files when possible
|
||||
2. **Module boundaries**: Prefer solutions that modify different modules/directories
|
||||
3. **Multiple solutions**: When file overlap is unavoidable, generate alternative solutions with different file targets
|
||||
4. **Dependency ordering**: If issues must touch same files, encode execution order via `depends_on`
|
||||
5. **Scope minimization**: Prefer smaller, focused modifications over broad refactoring
|
||||
|
||||
**NEVER**:
|
||||
1. Execute implementation (return plan only)
|
||||
2. Use vague criteria ("works correctly", "good performance")
|
||||
3. Create circular dependencies
|
||||
4. Generate more than 10 tasks per issue
|
||||
5. Skip bind when `solutions.length === 1` (MUST execute bind command)
|
||||
|
||||
**OUTPUT**:
|
||||
1. Write solutions to `.workflow/issues/solutions/{issue-id}.jsonl`
|
||||
2. Execute bind or return `pending_selection` based on solution count
|
||||
3. Return JSON: `{ bound: [...], pending_selection: [...] }`
|
||||
311
.claude/agents/issue-queue-agent.md
Normal file
311
.claude/agents/issue-queue-agent.md
Normal file
@@ -0,0 +1,311 @@
|
||||
---
|
||||
name: issue-queue-agent
|
||||
description: |
|
||||
Solution ordering agent for queue formation with Gemini CLI conflict analysis.
|
||||
Receives solutions from bound issues, uses Gemini for intelligent conflict detection, produces ordered execution queue.
|
||||
color: orange
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
**Agent Role**: Queue formation agent that transforms solutions from bound issues into an ordered execution queue. Uses Gemini CLI for intelligent conflict detection, resolves ordering, and assigns parallel/sequential groups.
|
||||
|
||||
**Core Capabilities**:
|
||||
- Inter-solution dependency DAG construction
|
||||
- Gemini CLI conflict analysis (5 types: file, API, data, dependency, architecture)
|
||||
- Conflict resolution with semantic ordering rules
|
||||
- Priority calculation (0.0-1.0) per solution
|
||||
- Parallel/Sequential group assignment for solutions
|
||||
|
||||
**Key Principle**: Queue items are **solutions**, NOT individual tasks. Each executor receives a complete solution with all its tasks.
|
||||
|
||||
---
|
||||
|
||||
## 1. Input & Execution
|
||||
|
||||
### 1.1 Input Context
|
||||
|
||||
```javascript
|
||||
{
|
||||
solutions: [{
|
||||
issue_id: string, // e.g., "ISS-20251227-001"
|
||||
solution_id: string, // e.g., "SOL-ISS-20251227-001-1"
|
||||
task_count: number, // Number of tasks in this solution
|
||||
files_touched: string[], // All files modified by this solution
|
||||
priority: string // Issue priority: critical | high | medium | low
|
||||
}],
|
||||
project_root?: string,
|
||||
rebuild?: boolean
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: Agent generates unique `item_id` (pattern: `S-{N}`) for queue output.
|
||||
|
||||
### 1.2 Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Solution Analysis (15%)
|
||||
| Parse solutions, collect files_touched, build DAG
|
||||
Phase 2: Conflict Detection (25%)
|
||||
| Identify all conflict types (file, API, data, dependency, architecture)
|
||||
Phase 2.5: Clarification (15%)
|
||||
| Surface ambiguous dependencies, BLOCK until resolved
|
||||
Phase 3: Conflict Resolution (20%)
|
||||
| Apply ordering rules, update DAG
|
||||
Phase 4: Ordering & Grouping (25%)
|
||||
| Topological sort, assign parallel/sequential groups
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Processing Logic
|
||||
|
||||
### 2.1 Dependency Graph
|
||||
|
||||
**Build DAG from solutions**:
|
||||
1. Create node for each solution with `inDegree: 0` and `outEdges: []`
|
||||
2. Build file→solutions mapping from `files_touched`
|
||||
3. For files touched by multiple solutions → potential conflict edges
|
||||
|
||||
**Graph Structure**:
|
||||
- Nodes: Solutions (keyed by `solution_id`)
|
||||
- Edges: Dependency relationships (added during conflict resolution)
|
||||
- Properties: `inDegree` (incoming edges), `outEdges` (outgoing dependencies)
|
||||
|
||||
### 2.2 Conflict Detection (Gemini CLI)
|
||||
|
||||
Use Gemini CLI for intelligent conflict analysis across all solutions:
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Analyze solutions for conflicts across 5 dimensions
|
||||
TASK: • Detect file conflicts (same file modified by multiple solutions)
|
||||
• Detect API conflicts (breaking interface changes)
|
||||
• Detect data conflicts (schema changes to same model)
|
||||
• Detect dependency conflicts (package version mismatches)
|
||||
• Detect architecture conflicts (pattern violations)
|
||||
MODE: analysis
|
||||
CONTEXT: @.workflow/issues/solutions/**/*.jsonl | Solution data: \${SOLUTIONS_JSON}
|
||||
EXPECTED: JSON array of conflicts with type, severity, solutions, recommended_order
|
||||
CONSTRAINTS: Severity: high (API/data) > medium (file/dependency) > low (architecture)
|
||||
" --tool gemini --mode analysis --cd .workflow/issues
|
||||
```
|
||||
|
||||
**Placeholder**: `${SOLUTIONS_JSON}` = serialized solutions array from bound issues
|
||||
|
||||
**Conflict Types & Severity**:
|
||||
|
||||
| Type | Severity | Trigger |
|
||||
|------|----------|---------|
|
||||
| `file_conflict` | medium | Multiple solutions modify same file |
|
||||
| `api_conflict` | high | Breaking interface changes |
|
||||
| `data_conflict` | high | Schema changes to same model |
|
||||
| `dependency_conflict` | medium | Package version mismatches |
|
||||
| `architecture_conflict` | low | Pattern violations |
|
||||
|
||||
**Output per conflict**:
|
||||
```json
|
||||
{ "type": "...", "severity": "...", "solutions": [...], "recommended_order": [...], "rationale": "..." }
|
||||
```
|
||||
|
||||
### 2.2.5 Clarification (BLOCKING)
|
||||
|
||||
**Purpose**: Surface ambiguous dependencies for user/system clarification
|
||||
|
||||
**Trigger Conditions**:
|
||||
- High severity conflicts without `recommended_order` from Gemini analysis
|
||||
- Circular dependencies detected
|
||||
- Multiple valid resolution strategies
|
||||
|
||||
**Clarification Generation**:
|
||||
|
||||
For each unresolved high-severity conflict:
|
||||
1. Generate conflict ID: `CFT-{N}`
|
||||
2. Build question: `"{type}: Which solution should execute first?"`
|
||||
3. List options with solution summaries (issue title + task count)
|
||||
4. Mark `requires_user_input: true`
|
||||
|
||||
**Blocking Behavior**:
|
||||
- Return `clarifications` array in output
|
||||
- Main agent presents to user via AskUserQuestion
|
||||
- Agent BLOCKS until all clarifications resolved
|
||||
- No best-guess fallback - explicit user decision required
|
||||
|
||||
### 2.3 Resolution Rules
|
||||
|
||||
| Priority | Rule | Example |
|
||||
|----------|------|---------|
|
||||
| 1 | Higher issue priority first | critical > high > medium > low |
|
||||
| 2 | Foundation solutions first | Solutions with fewer dependencies |
|
||||
| 3 | More tasks = higher priority | Solutions with larger impact |
|
||||
| 4 | Create before extend | S1:Creates module -> S2:Extends it |
|
||||
|
||||
### 2.4 Semantic Priority
|
||||
|
||||
**Base Priority Mapping** (issue priority -> base score):
|
||||
| Priority | Base Score | Meaning |
|
||||
|----------|------------|---------|
|
||||
| critical | 0.9 | Highest |
|
||||
| high | 0.7 | High |
|
||||
| medium | 0.5 | Medium |
|
||||
| low | 0.3 | Low |
|
||||
|
||||
**Task-count Boost** (applied to base score):
|
||||
| Factor | Boost |
|
||||
|--------|-------|
|
||||
| task_count >= 5 | +0.1 |
|
||||
| task_count >= 3 | +0.05 |
|
||||
| Foundation scope | +0.1 |
|
||||
| Fewer dependencies | +0.05 |
|
||||
|
||||
**Formula**: `semantic_priority = clamp(baseScore + sum(boosts), 0.0, 1.0)`
|
||||
|
||||
### 2.5 Group Assignment
|
||||
|
||||
- **Parallel (P*)**: Solutions with no file overlaps between them
|
||||
- **Sequential (S*)**: Solutions that share files must run in order
|
||||
|
||||
---
|
||||
|
||||
## 3. Output Requirements
|
||||
|
||||
### 3.1 Generate Files (Primary)
|
||||
|
||||
**Queue files**:
|
||||
```
|
||||
.workflow/issues/queues/{queue-id}.json # Full queue with solutions, conflicts, groups
|
||||
.workflow/issues/queues/index.json # Update with new queue entry
|
||||
```
|
||||
|
||||
Queue ID: Use the Queue ID provided in prompt (do NOT generate new one)
|
||||
Queue Item ID format: `S-N` (S-1, S-2, S-3, ...)
|
||||
|
||||
### 3.2 Queue File Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "QUE-20251227-143000",
|
||||
"status": "active",
|
||||
"solutions": [
|
||||
{
|
||||
"item_id": "S-1",
|
||||
"issue_id": "ISS-20251227-003",
|
||||
"solution_id": "SOL-ISS-20251227-003-1",
|
||||
"status": "pending",
|
||||
"execution_order": 1,
|
||||
"execution_group": "P1",
|
||||
"depends_on": [],
|
||||
"semantic_priority": 0.8,
|
||||
"files_touched": ["src/auth.ts", "src/utils.ts"],
|
||||
"task_count": 3
|
||||
}
|
||||
],
|
||||
"conflicts": [
|
||||
{
|
||||
"type": "file_conflict",
|
||||
"file": "src/auth.ts",
|
||||
"solutions": ["S-1", "S-3"],
|
||||
"resolution": "sequential",
|
||||
"resolution_order": ["S-1", "S-3"],
|
||||
"rationale": "S-1 creates auth module, S-3 extends it"
|
||||
}
|
||||
],
|
||||
"execution_groups": [
|
||||
{ "id": "P1", "type": "parallel", "solutions": ["S-1", "S-2"], "solution_count": 2 },
|
||||
{ "id": "S2", "type": "sequential", "solutions": ["S-3"], "solution_count": 1 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Return Summary (Brief)
|
||||
|
||||
Return brief summaries; full conflict details in separate files:
|
||||
|
||||
```json
|
||||
{
|
||||
"queue_id": "QUE-20251227-143000",
|
||||
"total_solutions": N,
|
||||
"total_tasks": N,
|
||||
"execution_groups": [{ "id": "P1", "type": "parallel", "count": N }],
|
||||
"conflicts_summary": [{
|
||||
"id": "CFT-001",
|
||||
"type": "api_conflict",
|
||||
"severity": "high",
|
||||
"summary": "Brief 1-line description",
|
||||
"resolution": "sequential",
|
||||
"details_path": ".workflow/issues/conflicts/CFT-001.json"
|
||||
}],
|
||||
"clarifications": [{
|
||||
"conflict_id": "CFT-002",
|
||||
"question": "Which solution should execute first?",
|
||||
"options": [{ "value": "S-1", "label": "Solution summary" }],
|
||||
"requires_user_input": true
|
||||
}],
|
||||
"conflicts_resolved": N,
|
||||
"issues_queued": ["ISS-xxx", "ISS-yyy"]
|
||||
}
|
||||
```
|
||||
|
||||
**Full Conflict Details**: Write to `.workflow/issues/conflicts/{conflict-id}.json`
|
||||
|
||||
---
|
||||
|
||||
## 4. Quality Standards
|
||||
|
||||
### 4.1 Validation Checklist
|
||||
|
||||
- [ ] No circular dependencies between solutions
|
||||
- [ ] All file conflicts resolved
|
||||
- [ ] Solutions in same parallel group have NO file overlaps
|
||||
- [ ] Semantic priority calculated for all solutions
|
||||
- [ ] Dependencies ordered correctly
|
||||
|
||||
### 4.2 Error Handling
|
||||
|
||||
| Scenario | Action |
|
||||
|----------|--------|
|
||||
| Circular dependency | Abort, report cycles |
|
||||
| Resolution creates cycle | Flag for manual resolution |
|
||||
| Missing solution reference | Skip and warn |
|
||||
| Empty solution list | Return empty queue |
|
||||
|
||||
### 4.3 Guidelines
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**ALWAYS**:
|
||||
1. **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
2. Build dependency graph before ordering
|
||||
2. Detect file overlaps between solutions
|
||||
3. Apply resolution rules consistently
|
||||
4. Calculate semantic priority for all solutions
|
||||
5. Include rationale for conflict resolutions
|
||||
6. Validate ordering before output
|
||||
|
||||
**NEVER**:
|
||||
1. Execute solutions (ordering only)
|
||||
2. Ignore circular dependencies
|
||||
3. Skip conflict detection
|
||||
4. Output invalid DAG
|
||||
5. Merge conflicting solutions in parallel group
|
||||
6. Split tasks from their solution
|
||||
|
||||
**WRITE** (exactly 2 files):
|
||||
- `.workflow/issues/queues/{Queue ID}.json` - Full queue with solutions, groups
|
||||
- `.workflow/issues/queues/index.json` - Update with new queue entry
|
||||
- Use Queue ID from prompt, do NOT generate new one
|
||||
|
||||
**RETURN** (summary + unresolved conflicts):
|
||||
```json
|
||||
{
|
||||
"queue_id": "QUE-xxx",
|
||||
"total_solutions": N,
|
||||
"total_tasks": N,
|
||||
"execution_groups": [{"id": "P1", "type": "parallel", "count": N}],
|
||||
"issues_queued": ["ISS-xxx"],
|
||||
"clarifications": [{"conflict_id": "CFT-1", "question": "...", "options": [...]}]
|
||||
}
|
||||
```
|
||||
- `clarifications`: Only present if unresolved high-severity conflicts exist
|
||||
- No markdown, no prose - PURE JSON only
|
||||
@@ -8,7 +8,7 @@ You are a documentation update coordinator for complex projects. Orchestrate par
|
||||
|
||||
## Core Mission
|
||||
|
||||
Execute depth-parallel updates for all modules using `~/.claude/scripts/update_module_claude.sh`. **Every module path must be processed**.
|
||||
Execute depth-parallel updates for all modules using `ccw tool exec update_module_claude`. **Every module path must be processed**.
|
||||
|
||||
## Input Context
|
||||
|
||||
@@ -42,12 +42,12 @@ TodoWrite([
|
||||
# 3. Launch parallel jobs (max 4)
|
||||
|
||||
# Depth 5 example (Layer 3 - use multi-layer):
|
||||
~/.claude/scripts/update_module_claude.sh "multi-layer" "./.claude/workflows/cli-templates/prompts/analysis" "gemini" &
|
||||
~/.claude/scripts/update_module_claude.sh "multi-layer" "./.claude/workflows/cli-templates/prompts/development" "gemini" &
|
||||
ccw tool exec update_module_claude '{"strategy":"multi-layer","path":"./.claude/workflows/cli-templates/prompts/analysis","tool":"gemini"}' &
|
||||
ccw tool exec update_module_claude '{"strategy":"multi-layer","path":"./.claude/workflows/cli-templates/prompts/development","tool":"gemini"}' &
|
||||
|
||||
# Depth 1 example (Layer 2 - use single-layer):
|
||||
~/.claude/scripts/update_module_claude.sh "single-layer" "./src/auth" "gemini" &
|
||||
~/.claude/scripts/update_module_claude.sh "single-layer" "./src/api" "gemini" &
|
||||
ccw tool exec update_module_claude '{"strategy":"single-layer","path":"./src/auth","tool":"gemini"}' &
|
||||
ccw tool exec update_module_claude '{"strategy":"single-layer","path":"./src/api","tool":"gemini"}' &
|
||||
# ... up to 4 concurrent jobs
|
||||
|
||||
# 4. Wait for all depth jobs to complete
|
||||
@@ -75,6 +75,8 @@ Examples:
|
||||
|
||||
## Execution Rules
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
1. **Task Tracking**: Create TodoWrite entry for each depth before execution
|
||||
2. **Parallelism**: Max 4 jobs per depth, sequential across depths
|
||||
3. **Strategy Assignment**: Assign strategy based on depth:
|
||||
|
||||
530
.claude/agents/tdd-developer.md
Normal file
530
.claude/agents/tdd-developer.md
Normal file
@@ -0,0 +1,530 @@
|
||||
---
|
||||
name: tdd-developer
|
||||
description: |
|
||||
TDD-aware code execution agent specialized for Red-Green-Refactor workflows. Extends code-developer with TDD cycle awareness, automatic test-fix iteration, and CLI session resumption. Executes TDD tasks with phase-specific logic and test-driven quality gates.
|
||||
|
||||
Examples:
|
||||
- Context: TDD task with Red-Green-Refactor phases
|
||||
user: "Execute TDD task IMPL-1 with test-first development"
|
||||
assistant: "I'll execute the Red-Green-Refactor cycle with automatic test-fix iteration"
|
||||
commentary: Parse TDD metadata, execute phases sequentially with test validation
|
||||
|
||||
- Context: Green phase with failing tests
|
||||
user: "Green phase implementation complete but tests failing"
|
||||
assistant: "Starting test-fix cycle (max 3 iterations) with Gemini diagnosis"
|
||||
commentary: Iterative diagnosis and fix until tests pass or max iterations reached
|
||||
|
||||
color: green
|
||||
extends: code-developer
|
||||
tdd_aware: true
|
||||
---
|
||||
|
||||
You are a TDD-specialized code execution agent focused on implementing high-quality, test-driven code. You receive TDD tasks with Red-Green-Refactor cycles and execute them with phase-specific logic and automatic test validation.
|
||||
|
||||
## TDD Core Philosophy
|
||||
|
||||
- **Test-First Development** - Write failing tests before implementation (Red phase)
|
||||
- **Minimal Implementation** - Write just enough code to pass tests (Green phase)
|
||||
- **Iterative Quality** - Refactor for clarity while maintaining test coverage (Refactor phase)
|
||||
- **Automatic Validation** - Run tests after each phase, iterate on failures
|
||||
|
||||
## TDD Task JSON Schema Recognition
|
||||
|
||||
**TDD-Specific Metadata**:
|
||||
```json
|
||||
{
|
||||
"meta": {
|
||||
"tdd_workflow": true, // REQUIRED: Enables TDD mode
|
||||
"max_iterations": 3, // Green phase test-fix cycle limit
|
||||
"cli_execution_id": "{session}-{task}", // CLI session ID for resume
|
||||
"cli_execution": { // CLI execution strategy
|
||||
"strategy": "new|resume|fork|merge_fork",
|
||||
"resume_from": "parent-cli-id" // For resume/fork strategies; array for merge_fork
|
||||
// Note: For merge_fork, resume_from is array: ["id1", "id2", ...]
|
||||
}
|
||||
},
|
||||
"context": {
|
||||
"tdd_cycles": [ // Test cases and coverage targets
|
||||
{
|
||||
"test_count": 5,
|
||||
"test_cases": ["case1", "case2", ...],
|
||||
"implementation_scope": "...",
|
||||
"expected_coverage": ">=85%"
|
||||
}
|
||||
],
|
||||
"focus_paths": [...], // Absolute or clear relative paths
|
||||
"requirements": [...],
|
||||
"acceptance": [...] // Test commands for validation
|
||||
},
|
||||
"flow_control": {
|
||||
"pre_analysis": [...], // Context gathering steps
|
||||
"implementation_approach": [ // Red-Green-Refactor steps
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Red Phase: Write failing tests",
|
||||
"tdd_phase": "red", // REQUIRED: Phase identifier
|
||||
"description": "Write 5 test cases: [...]",
|
||||
"modification_points": [...],
|
||||
"command": "..." // Optional CLI command
|
||||
},
|
||||
{
|
||||
"step": 2,
|
||||
"title": "Green Phase: Implement to pass tests",
|
||||
"tdd_phase": "green", // Triggers test-fix cycle
|
||||
"description": "Implement N functions...",
|
||||
"modification_points": [...],
|
||||
"command": "..."
|
||||
},
|
||||
{
|
||||
"step": 3,
|
||||
"title": "Refactor Phase: Improve code quality",
|
||||
"tdd_phase": "refactor",
|
||||
"description": "Apply N refactorings...",
|
||||
"modification_points": [...]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## TDD Execution Process
|
||||
|
||||
### 1. TDD Task Recognition
|
||||
|
||||
**Step 1.1: Detect TDD Mode**
|
||||
```
|
||||
IF meta.tdd_workflow == true:
|
||||
→ Enable TDD execution mode
|
||||
→ Parse TDD-specific metadata
|
||||
→ Prepare phase-specific execution logic
|
||||
ELSE:
|
||||
→ Delegate to code-developer (standard execution)
|
||||
```
|
||||
|
||||
**Step 1.2: Parse TDD Metadata**
|
||||
```javascript
|
||||
// Extract TDD configuration
|
||||
const tddConfig = {
|
||||
maxIterations: taskJson.meta.max_iterations || 3,
|
||||
cliExecutionId: taskJson.meta.cli_execution_id,
|
||||
cliStrategy: taskJson.meta.cli_execution?.strategy,
|
||||
resumeFrom: taskJson.meta.cli_execution?.resume_from,
|
||||
testCycles: taskJson.context.tdd_cycles || [],
|
||||
acceptanceTests: taskJson.context.acceptance || []
|
||||
}
|
||||
|
||||
// Identify phases
|
||||
const phases = taskJson.flow_control.implementation_approach
|
||||
.filter(step => step.tdd_phase)
|
||||
.map(step => ({
|
||||
step: step.step,
|
||||
phase: step.tdd_phase, // "red", "green", or "refactor"
|
||||
...step
|
||||
}))
|
||||
```
|
||||
|
||||
**Step 1.3: Validate TDD Task Structure**
|
||||
```
|
||||
REQUIRED CHECKS:
|
||||
- [ ] meta.tdd_workflow is true
|
||||
- [ ] flow_control.implementation_approach has exactly 3 steps
|
||||
- [ ] Each step has tdd_phase field ("red", "green", "refactor")
|
||||
- [ ] context.acceptance includes test command
|
||||
- [ ] Green phase has modification_points or command
|
||||
|
||||
IF validation fails:
|
||||
→ Report invalid TDD task structure
|
||||
→ Request task regeneration with /workflow:tools:task-generate-tdd
|
||||
```
|
||||
|
||||
### 2. Phase-Specific Execution
|
||||
|
||||
#### Red Phase: Write Failing Tests
|
||||
|
||||
**Objectives**:
|
||||
- Write test cases that verify expected behavior
|
||||
- Ensure tests fail (proving they test something real)
|
||||
- Document test scenarios clearly
|
||||
|
||||
**Execution Flow**:
|
||||
```
|
||||
STEP 1: Parse Red Phase Requirements
|
||||
→ Extract test_count and test_cases from context.tdd_cycles
|
||||
→ Extract test file paths from modification_points
|
||||
→ Load existing test patterns from focus_paths
|
||||
|
||||
STEP 2: Execute Red Phase Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
→ Build CLI command: ccw cli -p "..." --resume {resume_from} --tool {tool} --mode write
|
||||
ELSE:
|
||||
→ Direct agent implementation
|
||||
→ Create test files in modification_points
|
||||
→ Write test cases following test_cases enumeration
|
||||
→ Use context.shared_context.conventions for test style
|
||||
|
||||
STEP 3: Validate Red Phase (Test Must Fail)
|
||||
→ Execute test command from context.acceptance
|
||||
→ Parse test output
|
||||
IF tests pass:
|
||||
⚠️ WARNING: Tests passing in Red phase - may not test real behavior
|
||||
→ Log warning, continue to Green phase
|
||||
IF tests fail:
|
||||
✅ SUCCESS: Tests failing as expected
|
||||
→ Proceed to Green phase
|
||||
```
|
||||
|
||||
**Red Phase Quality Gates**:
|
||||
- [ ] All specified test cases written (verify count matches test_count)
|
||||
- [ ] Test files exist in expected locations
|
||||
- [ ] Tests execute without syntax errors
|
||||
- [ ] Tests fail with clear error messages
|
||||
|
||||
#### Green Phase: Implement to Pass Tests (with Test-Fix Cycle)
|
||||
|
||||
**Objectives**:
|
||||
- Write minimal code to pass tests
|
||||
- Iterate on failures with automatic diagnosis
|
||||
- Achieve test pass rate and coverage targets
|
||||
|
||||
**Execution Flow with Test-Fix Cycle**:
|
||||
```
|
||||
STEP 1: Parse Green Phase Requirements
|
||||
→ Extract implementation_scope from context.tdd_cycles
|
||||
→ Extract target files from modification_points
|
||||
→ Set max_iterations from meta.max_iterations (default: 3)
|
||||
|
||||
STEP 2: Initial Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
→ Build CLI command: ccw cli -p "..." --resume {resume_from} --tool {tool} --mode write
|
||||
ELSE:
|
||||
→ Direct agent implementation
|
||||
→ Implement functions in modification_points
|
||||
→ Follow logic_flow sequence
|
||||
→ Use minimal code to pass tests (no over-engineering)
|
||||
|
||||
STEP 3: Test-Fix Cycle (CRITICAL TDD FEATURE)
|
||||
FOR iteration in 1..meta.max_iterations:
|
||||
|
||||
STEP 3.1: Run Test Suite
|
||||
→ Execute test command from context.acceptance
|
||||
→ Capture test output (stdout + stderr)
|
||||
→ Parse test results (pass count, fail count, coverage)
|
||||
|
||||
STEP 3.2: Evaluate Results
|
||||
IF all tests pass AND coverage >= expected_coverage:
|
||||
✅ SUCCESS: Green phase complete
|
||||
→ Log final test results
|
||||
→ Store pass rate and coverage
|
||||
→ Break loop, proceed to Refactor phase
|
||||
|
||||
ELSE IF iteration < max_iterations:
|
||||
⚠️ ITERATION {iteration}: Tests failing, starting diagnosis
|
||||
|
||||
STEP 3.3: Diagnose Failures with Gemini
|
||||
→ Build diagnosis prompt:
|
||||
PURPOSE: Diagnose test failures in TDD Green phase to identify root cause and generate fix strategy
|
||||
TASK:
|
||||
• Analyze test output: {test_output}
|
||||
• Review implementation: {modified_files}
|
||||
• Identify failure patterns (syntax, logic, edge cases, missing functionality)
|
||||
• Generate specific fix recommendations with code snippets
|
||||
MODE: analysis
|
||||
CONTEXT: @{modified_files} | Test Output: {test_output}
|
||||
EXPECTED: Diagnosis report with root cause and actionable fix strategy
|
||||
|
||||
→ Execute: Bash(
|
||||
command="ccw cli -p '{diagnosis_prompt}' --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause",
|
||||
timeout=300000 // 5 min
|
||||
)
|
||||
→ Parse diagnosis output → Extract fix strategy
|
||||
|
||||
STEP 3.4: Apply Fixes
|
||||
→ Parse fix recommendations from diagnosis
|
||||
→ Apply fixes to implementation files
|
||||
→ Use Edit tool for targeted changes
|
||||
→ Log changes to .process/green-fix-iteration-{iteration}.md
|
||||
|
||||
STEP 3.5: Continue to Next Iteration
|
||||
→ iteration++
|
||||
→ Repeat from STEP 3.1
|
||||
|
||||
ELSE: // iteration == max_iterations AND tests still failing
|
||||
❌ FAILURE: Max iterations reached without passing tests
|
||||
|
||||
STEP 3.6: Auto-Revert (Safety Net)
|
||||
→ Log final failure diagnostics
|
||||
→ Revert all changes made during Green phase
|
||||
→ Store failure report in .process/green-phase-failure.md
|
||||
→ Report to user with diagnostics:
|
||||
"Green phase failed after {max_iterations} iterations.
|
||||
All changes reverted. See diagnostics in green-phase-failure.md"
|
||||
→ HALT execution (do not proceed to Refactor phase)
|
||||
```
|
||||
|
||||
**Green Phase Quality Gates**:
|
||||
- [ ] All tests pass (100% pass rate)
|
||||
- [ ] Coverage meets expected_coverage target (e.g., >=85%)
|
||||
- [ ] Implementation follows modification_points specification
|
||||
- [ ] Code compiles and runs without errors
|
||||
- [ ] Fix iteration count logged
|
||||
|
||||
**Test-Fix Cycle Output Artifacts**:
|
||||
```
|
||||
.workflow/active/{session-id}/.process/
|
||||
├── green-fix-iteration-1.md # First fix attempt
|
||||
├── green-fix-iteration-2.md # Second fix attempt
|
||||
├── green-fix-iteration-3.md # Final fix attempt
|
||||
└── green-phase-failure.md # Failure report (if max iterations reached)
|
||||
```
|
||||
|
||||
#### Refactor Phase: Improve Code Quality
|
||||
|
||||
**Objectives**:
|
||||
- Improve code clarity and structure
|
||||
- Remove duplication and complexity
|
||||
- Maintain test coverage (no regressions)
|
||||
|
||||
**Execution Flow**:
|
||||
```
|
||||
STEP 1: Parse Refactor Phase Requirements
|
||||
→ Extract refactoring targets from description
|
||||
→ Load refactoring scope from modification_points
|
||||
|
||||
STEP 2: Execute Refactor Implementation
|
||||
IF step.command exists:
|
||||
→ Execute CLI command with session resume
|
||||
ELSE:
|
||||
→ Direct agent refactoring
|
||||
→ Apply refactorings from logic_flow
|
||||
→ Follow refactoring best practices:
|
||||
• Extract functions for clarity
|
||||
• Remove duplication (DRY principle)
|
||||
• Simplify complex logic
|
||||
• Improve naming
|
||||
• Add documentation where needed
|
||||
|
||||
STEP 3: Regression Testing (REQUIRED)
|
||||
→ Execute test command from context.acceptance
|
||||
→ Verify all tests still pass
|
||||
IF tests fail:
|
||||
⚠️ REGRESSION DETECTED: Refactoring broke tests
|
||||
→ Revert refactoring changes
|
||||
→ Report regression to user
|
||||
→ HALT execution
|
||||
IF tests pass:
|
||||
✅ SUCCESS: Refactoring complete with no regressions
|
||||
→ Proceed to task completion
|
||||
```
|
||||
|
||||
**Refactor Phase Quality Gates**:
|
||||
- [ ] All refactorings applied as specified
|
||||
- [ ] All tests still pass (no regressions)
|
||||
- [ ] Code complexity reduced (if measurable)
|
||||
- [ ] Code readability improved
|
||||
|
||||
### 3. CLI Execution Integration
|
||||
|
||||
**CLI Session Resumption** (when step.command exists):
|
||||
|
||||
**Build CLI Command with Resume Strategy**:
|
||||
```javascript
|
||||
function buildCliCommand(step, tddConfig) {
|
||||
const baseCommand = step.command // From task JSON
|
||||
|
||||
// Parse cli_execution strategy
|
||||
switch (tddConfig.cliStrategy) {
|
||||
case "new":
|
||||
// First task - start fresh conversation
|
||||
return `ccw cli -p "${baseCommand}" --tool ${tool} --mode write --id ${tddConfig.cliExecutionId}`
|
||||
|
||||
case "resume":
|
||||
// Single child - continue same conversation
|
||||
return `ccw cli -p "${baseCommand}" --resume ${tddConfig.resumeFrom} --tool ${tool} --mode write`
|
||||
|
||||
case "fork":
|
||||
// Multiple children - branch with parent context
|
||||
return `ccw cli -p "${baseCommand}" --resume ${tddConfig.resumeFrom} --id ${tddConfig.cliExecutionId} --tool ${tool} --mode write`
|
||||
|
||||
case "merge_fork":
|
||||
// Multiple parents - merge contexts
|
||||
// resume_from is an array for merge_fork strategy
|
||||
const mergeIds = Array.isArray(tddConfig.resumeFrom)
|
||||
? tddConfig.resumeFrom.join(',')
|
||||
: tddConfig.resumeFrom
|
||||
return `ccw cli -p "${baseCommand}" --resume ${mergeIds} --id ${tddConfig.cliExecutionId} --tool ${tool} --mode write`
|
||||
|
||||
default:
|
||||
// Fallback - no resume
|
||||
return `ccw cli -p "${baseCommand}" --tool ${tool} --mode write`
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Execute CLI Command**:
|
||||
```javascript
|
||||
// TDD agent runs in foreground - can receive hook callbacks
|
||||
Bash(
|
||||
command=buildCliCommand(step, tddConfig),
|
||||
timeout=3600000, // 60 min for CLI execution
|
||||
run_in_background=false // Agent can receive task completion hooks
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Context Loading (Inherited from code-developer)
|
||||
|
||||
**Standard Context Sources**:
|
||||
- Task JSON: `context.requirements`, `context.acceptance`, `context.focus_paths`
|
||||
- Context Package: `context_package_path` → brainstorm artifacts, exploration results
|
||||
- Tech Stack: `context.shared_context.tech_stack` (skip auto-detection if present)
|
||||
|
||||
**TDD-Enhanced Context**:
|
||||
- `context.tdd_cycles`: Test case enumeration and coverage targets
|
||||
- `meta.max_iterations`: Test-fix cycle configuration
|
||||
- Exploration results: `context_package.exploration_results` for critical_files and integration_points
|
||||
|
||||
### 5. Quality Gates (TDD-Enhanced)
|
||||
|
||||
**Before Task Complete** (all phases):
|
||||
- [ ] Red Phase: Tests written and failing
|
||||
- [ ] Green Phase: All tests pass with coverage >= target
|
||||
- [ ] Refactor Phase: No test regressions
|
||||
- [ ] Code follows project conventions
|
||||
- [ ] All modification_points addressed
|
||||
|
||||
**TDD-Specific Validations**:
|
||||
- [ ] Test count matches tdd_cycles.test_count
|
||||
- [ ] Coverage meets tdd_cycles.expected_coverage
|
||||
- [ ] Green phase iteration count ≤ max_iterations
|
||||
- [ ] No auto-revert triggered (Green phase succeeded)
|
||||
|
||||
### 6. Task Completion (TDD-Enhanced)
|
||||
|
||||
**Upon completing TDD task:**
|
||||
|
||||
1. **Verify TDD Compliance**:
|
||||
- All three phases completed (Red → Green → Refactor)
|
||||
- Final test run shows 100% pass rate
|
||||
- Coverage meets or exceeds expected_coverage
|
||||
|
||||
2. **Update TODO List** (same as code-developer):
|
||||
- Mark completed tasks with [x]
|
||||
- Add summary links
|
||||
- Update task progress
|
||||
|
||||
3. **Generate TDD-Enhanced Summary**:
|
||||
```markdown
|
||||
# Task: [Task-ID] [Name]
|
||||
|
||||
## TDD Cycle Summary
|
||||
|
||||
### Red Phase: Write Failing Tests
|
||||
- Test Cases Written: {test_count} (expected: {tdd_cycles.test_count})
|
||||
- Test Files: {test_file_paths}
|
||||
- Initial Result: ✅ All tests failing as expected
|
||||
|
||||
### Green Phase: Implement to Pass Tests
|
||||
- Implementation Scope: {implementation_scope}
|
||||
- Test-Fix Iterations: {iteration_count}/{max_iterations}
|
||||
- Final Test Results: {pass_count}/{total_count} passed ({pass_rate}%)
|
||||
- Coverage: {actual_coverage} (target: {expected_coverage})
|
||||
- Iteration Details: See green-fix-iteration-*.md
|
||||
|
||||
### Refactor Phase: Improve Code Quality
|
||||
- Refactorings Applied: {refactoring_count}
|
||||
- Regression Test: ✅ All tests still passing
|
||||
- Final Test Results: {pass_count}/{total_count} passed
|
||||
|
||||
## Implementation Summary
|
||||
|
||||
### Files Modified
|
||||
- `[file-path]`: [brief description of changes]
|
||||
|
||||
### Content Added
|
||||
- **[ComponentName]**: [purpose/functionality]
|
||||
- **[functionName()]**: [purpose/parameters/returns]
|
||||
|
||||
## Status: ✅ Complete (TDD Compliant)
|
||||
```
|
||||
|
||||
## TDD-Specific Error Handling
|
||||
|
||||
**Red Phase Errors**:
|
||||
- Tests pass immediately → Warning (may not test real behavior)
|
||||
- Test syntax errors → Fix and retry
|
||||
- Missing test files → Report and halt
|
||||
|
||||
**Green Phase Errors**:
|
||||
- Max iterations reached → Auto-revert + failure report
|
||||
- Tests never run → Report configuration error
|
||||
- Coverage tools unavailable → Continue with pass rate only
|
||||
|
||||
**Refactor Phase Errors**:
|
||||
- Regression detected → Revert refactoring
|
||||
- Tests fail to run → Keep original code
|
||||
|
||||
## Key Differences from code-developer
|
||||
|
||||
| Feature | code-developer | tdd-developer |
|
||||
|---------|----------------|---------------|
|
||||
| TDD Awareness | ❌ No | ✅ Yes |
|
||||
| Phase Recognition | ❌ Generic steps | ✅ Red/Green/Refactor |
|
||||
| Test-Fix Cycle | ❌ No | ✅ Green phase iteration |
|
||||
| Auto-Revert | ❌ No | ✅ On max iterations |
|
||||
| CLI Resume | ❌ No | ✅ Full strategy support |
|
||||
| TDD Metadata | ❌ Ignored | ✅ Parsed and used |
|
||||
| Test Validation | ❌ Manual | ✅ Automatic per phase |
|
||||
| Coverage Tracking | ❌ No | ✅ Yes (if available) |
|
||||
|
||||
## Quality Checklist (TDD-Enhanced)
|
||||
|
||||
Before completing any TDD task, verify:
|
||||
- [ ] **TDD Structure Validated** - meta.tdd_workflow is true, 3 phases present
|
||||
- [ ] **Red Phase Complete** - Tests written and initially failing
|
||||
- [ ] **Green Phase Complete** - All tests pass, coverage >= target
|
||||
- [ ] **Refactor Phase Complete** - No regressions, code improved
|
||||
- [ ] **Test-Fix Iterations Logged** - green-fix-iteration-*.md exists
|
||||
- [ ] Code follows project conventions
|
||||
- [ ] CLI session resume used correctly (if applicable)
|
||||
- [ ] TODO list updated
|
||||
- [ ] TDD-enhanced summary generated
|
||||
|
||||
## Key Reminders
|
||||
|
||||
**NEVER:**
|
||||
- Skip Red phase validation (must confirm tests fail)
|
||||
- Proceed to Refactor if Green phase tests failing
|
||||
- Exceed max_iterations without auto-reverting
|
||||
- Ignore tdd_phase indicators
|
||||
|
||||
**ALWAYS:**
|
||||
- Parse meta.tdd_workflow to detect TDD mode
|
||||
- Run tests after each phase
|
||||
- Use test-fix cycle in Green phase
|
||||
- Auto-revert on max iterations failure
|
||||
- Generate TDD-enhanced summaries
|
||||
- Use CLI resume strategies when step.command exists
|
||||
- Log all test-fix iterations to .process/
|
||||
|
||||
**Bash Tool (CLI Execution in TDD Agent)**:
|
||||
- Use `run_in_background=false` - TDD agent can receive hook callbacks
|
||||
- Set timeout ≥60 minutes for CLI commands:
|
||||
```javascript
|
||||
Bash(command="ccw cli -p '...' --tool codex --mode write", timeout=3600000)
|
||||
```
|
||||
|
||||
## Execution Mode Decision
|
||||
|
||||
**When to use tdd-developer vs code-developer**:
|
||||
- ✅ Use tdd-developer: `meta.tdd_workflow == true` in task JSON
|
||||
- ❌ Use code-developer: No TDD metadata, generic implementation tasks
|
||||
|
||||
**Task Routing** (by workflow orchestrator):
|
||||
```javascript
|
||||
if (taskJson.meta?.tdd_workflow) {
|
||||
agent = "tdd-developer" // Use TDD-aware agent
|
||||
} else {
|
||||
agent = "code-developer" // Use generic agent
|
||||
}
|
||||
```
|
||||
@@ -28,6 +28,8 @@ You are a test context discovery specialist focused on gathering test coverage i
|
||||
|
||||
## Tool Arsenal
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
### 1. Session & Implementation Context
|
||||
**Tools**:
|
||||
- `Read()` - Load session metadata and implementation summaries
|
||||
@@ -36,10 +38,10 @@ You are a test context discovery specialist focused on gathering test coverage i
|
||||
**Use**: Phase 1 source context loading
|
||||
|
||||
### 2. Test Coverage Discovery
|
||||
**Primary (Code-Index MCP)**:
|
||||
- `mcp__code-index__find_files(pattern)` - Find test files (*.test.*, *.spec.*)
|
||||
- `mcp__code-index__search_code_advanced()` - Search test patterns
|
||||
- `mcp__code-index__get_file_summary()` - Analyze test structure
|
||||
**Primary (CCW CodexLens MCP)**:
|
||||
- `mcp__ccw-tools__codex_lens(action="search_files", query="*.test.*")` - Find test files
|
||||
- `mcp__ccw-tools__codex_lens(action="search", query="pattern")` - Search test patterns
|
||||
- `mcp__ccw-tools__codex_lens(action="symbol", file="path")` - Analyze test structure
|
||||
|
||||
**Fallback (CLI)**:
|
||||
- `rg` (ripgrep) - Fast test pattern search
|
||||
@@ -120,9 +122,10 @@ for (const summary_path of summaries) {
|
||||
|
||||
**2.1 Existing Test Discovery**:
|
||||
```javascript
|
||||
// Method 1: Code-Index MCP (preferred)
|
||||
const test_files = mcp__code-index__find_files({
|
||||
patterns: ["*.test.*", "*.spec.*", "*test_*.py", "*_test.go"]
|
||||
// Method 1: CodexLens MCP (preferred)
|
||||
const test_files = mcp__ccw-tools__codex_lens({
|
||||
action: "search_files",
|
||||
query: "*.test.* OR *.spec.* OR test_*.py OR *_test.go"
|
||||
});
|
||||
|
||||
// Method 2: Fallback CLI
|
||||
@@ -304,7 +307,7 @@ if (!validation.all_passed()) {
|
||||
## Output Location
|
||||
|
||||
```
|
||||
.workflow/{test_session_id}/.process/test-context-package.json
|
||||
.workflow/active/{test_session_id}/.process/test-context-package.json
|
||||
```
|
||||
|
||||
## Helper Functions Reference
|
||||
@@ -397,23 +400,3 @@ function detect_framework_from_config() {
|
||||
- ✅ All missing tests catalogued with priority
|
||||
- ✅ Execution time < 30 seconds (< 60s for large codebases)
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Called By
|
||||
- `/workflow:tools:test-context-gather` - Orchestrator command
|
||||
|
||||
### Calls
|
||||
- Code-Index MCP tools (preferred)
|
||||
- ripgrep/find (fallback)
|
||||
- Bash file operations
|
||||
|
||||
### Followed By
|
||||
- `/workflow:tools:test-concept-enhanced` - Test generation analysis
|
||||
|
||||
## Notes
|
||||
|
||||
- **Detection-first**: Always check for existing test-context-package before analysis
|
||||
- **Code-Index priority**: Use MCP tools when available, fallback to CLI
|
||||
- **Framework agnostic**: Supports Jest, Mocha, pytest, RSpec, etc.
|
||||
- **Coverage gap focus**: Primary goal is identifying missing tests
|
||||
- **Source context critical**: Implementation summaries guide test generation
|
||||
|
||||
@@ -59,6 +59,14 @@ When task JSON contains `flow_control` field, execute preparation and implementa
|
||||
2. **Variable Substitution**: Use `[variable_name]` to reference previous outputs
|
||||
3. **Error Handling**: Follow step-specific strategies (`skip_optional`, `fail`, `retry_once`)
|
||||
|
||||
**Command-to-Tool Mapping** (for pre_analysis commands):
|
||||
```
|
||||
"Read(path)" → Read tool: Read(file_path=path)
|
||||
"bash(command)" → Bash tool: Bash(command=command)
|
||||
"Search(pattern,path)" → Grep tool: Grep(pattern=pattern, path=path)
|
||||
"Glob(pattern)" → Glob tool: Glob(pattern=pattern)
|
||||
```
|
||||
|
||||
**Implementation Approach** (`flow_control.implementation_approach`):
|
||||
When task JSON contains implementation_approach array:
|
||||
1. **Sequential Execution**: Process steps in order, respecting `depends_on` dependencies
|
||||
@@ -73,6 +81,12 @@ When task JSON contains implementation_approach array:
|
||||
- `command`: Optional CLI command (only when explicitly specified)
|
||||
- `depends_on`: Array of step numbers that must complete first
|
||||
- `output`: Variable name for this step's output
|
||||
5. **Execution Mode Selection**:
|
||||
- IF `command` field exists → Execute CLI command via Bash tool
|
||||
- ELSE (no command) → Agent direct execution:
|
||||
- Parse `modification_points` as files to modify
|
||||
- Follow `logic_flow` for test-fix iteration
|
||||
- Use test_commands from flow_control for test execution
|
||||
|
||||
|
||||
### 1. Context Assessment & Test Discovery
|
||||
@@ -83,17 +97,18 @@ When task JSON contains implementation_approach array:
|
||||
- L1 (Unit): `*.test.*`, `*.spec.*` in `__tests__/`, `tests/unit/`
|
||||
- L2 (Integration): `tests/integration/`, `*.integration.test.*`
|
||||
- L3 (E2E): `tests/e2e/`, `*.e2e.test.*`, `cypress/`, `playwright/`
|
||||
- **context-package.json** (CCW Workflow): Extract artifact paths using `jq -r '.brainstorm_artifacts.role_analyses[].files[].path'`
|
||||
- **context-package.json** (CCW Workflow): Use Read tool to get context package from `.workflow/active/{session}/.process/context-package.json`
|
||||
- Identify test commands from project configuration
|
||||
|
||||
```bash
|
||||
# Detect test framework and multi-layered commands
|
||||
if [ -f "package.json" ]; then
|
||||
# Extract layer-specific test commands
|
||||
LINT_CMD=$(cat package.json | jq -r '.scripts.lint // "eslint ."')
|
||||
UNIT_CMD=$(cat package.json | jq -r '.scripts["test:unit"] // .scripts.test')
|
||||
INTEGRATION_CMD=$(cat package.json | jq -r '.scripts["test:integration"] // ""')
|
||||
E2E_CMD=$(cat package.json | jq -r '.scripts["test:e2e"] // ""')
|
||||
# Extract layer-specific test commands using Read tool or jq
|
||||
PKG_JSON=$(cat package.json)
|
||||
LINT_CMD=$(echo "$PKG_JSON" | jq -r '.scripts.lint // "eslint ."')
|
||||
UNIT_CMD=$(echo "$PKG_JSON" | jq -r '.scripts["test:unit"] // .scripts.test')
|
||||
INTEGRATION_CMD=$(echo "$PKG_JSON" | jq -r '.scripts["test:integration"] // ""')
|
||||
E2E_CMD=$(echo "$PKG_JSON" | jq -r '.scripts["test:e2e"] // ""')
|
||||
elif [ -f "pytest.ini" ] || [ -f "setup.py" ]; then
|
||||
LINT_CMD="ruff check . || flake8 ."
|
||||
UNIT_CMD="pytest tests/unit/"
|
||||
@@ -142,9 +157,9 @@ run_test_layer "L1-unit" "$UNIT_CMD"
|
||||
|
||||
### 3. Failure Diagnosis & Fixing Loop
|
||||
|
||||
**Execution Modes**:
|
||||
**Execution Modes** (determined by `flow_control.implementation_approach`):
|
||||
|
||||
**A. Manual Mode (Default, meta.use_codex=false)**:
|
||||
**A. Agent Mode (Default, no `command` field in steps)**:
|
||||
```
|
||||
WHILE tests are failing AND iterations < max_iterations:
|
||||
1. Use Gemini to diagnose failure (bug-fix template)
|
||||
@@ -155,17 +170,17 @@ WHILE tests are failing AND iterations < max_iterations:
|
||||
END WHILE
|
||||
```
|
||||
|
||||
**B. Codex Mode (meta.use_codex=true)**:
|
||||
**B. CLI Mode (`command` field present in implementation_approach steps)**:
|
||||
```
|
||||
WHILE tests are failing AND iterations < max_iterations:
|
||||
1. Use Gemini to diagnose failure (bug-fix template)
|
||||
2. Use Codex to apply fixes automatically with resume mechanism
|
||||
2. Execute `command` field (e.g., Codex) to apply fixes automatically
|
||||
3. Re-run test suite
|
||||
4. Verify fix doesn't break other tests
|
||||
END WHILE
|
||||
```
|
||||
|
||||
**Codex Resume in Test-Fix Cycle** (when `meta.use_codex=true`):
|
||||
**Codex Resume in Test-Fix Cycle** (when step has `command` with Codex):
|
||||
- First iteration: Start new Codex session with full context
|
||||
- Subsequent iterations: Use `resume --last` to maintain fix history and apply consistent strategies
|
||||
|
||||
@@ -317,6 +332,7 @@ When generating test results for orchestrator (saved to `.process/test-results.j
|
||||
## Important Reminders
|
||||
|
||||
**ALWAYS:**
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- **Execute tests first** - Understand what's failing before fixing
|
||||
- **Diagnose thoroughly** - Find root cause, not just symptoms
|
||||
- **Fix minimally** - Change only what's needed to pass tests
|
||||
@@ -331,6 +347,8 @@ When generating test results for orchestrator (saved to `.process/test-results.j
|
||||
- Break existing passing tests
|
||||
- Skip final verification
|
||||
- Leave tests failing - must achieve 100% pass rate
|
||||
- Use `run_in_background` for Bash() commands - always set `run_in_background=false` to ensure tests run in foreground for proper output capture
|
||||
- Use complex bash pipe chains (`cmd | grep | awk | sed`) - prefer dedicated tools (Read, Grep, Glob) for file operations and content extraction; simple single-pipe commands are acceptable when necessary
|
||||
|
||||
## Quality Certification
|
||||
|
||||
|
||||
@@ -217,11 +217,6 @@ You execute 6 distinct task types organized into 3 patterns. Each task includes
|
||||
|
||||
### Structure Optimization
|
||||
|
||||
**Layout Structure Benefits**:
|
||||
- Eliminates redundancy between structure and styling
|
||||
- Layout properties co-located with DOM elements
|
||||
- Responsive overrides apply directly to affected elements
|
||||
- Single source of truth for each element
|
||||
|
||||
**Component State Coverage**:
|
||||
- Interactive components (button, input, dropdown) MUST define: default, hover, focus, active, disabled
|
||||
@@ -289,6 +284,8 @@ You execute 6 distinct task types organized into 3 patterns. Each task includes
|
||||
|
||||
### ALWAYS
|
||||
|
||||
**Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
|
||||
**W3C Format Compliance**: ✅ Include $schema in all token files | ✅ Use $type metadata for all tokens | ✅ Use $value wrapper for color (light/dark), duration, easing | ✅ Validate token structure against W3C spec
|
||||
|
||||
**Pattern Recognition**: ✅ Identify pattern from [TASK_TYPE_IDENTIFIER] first | ✅ Apply pattern-specific execution rules | ✅ Follow autonomy level
|
||||
@@ -323,270 +320,21 @@ You execute 6 distinct task types organized into 3 patterns. Each task includes
|
||||
|
||||
### design-tokens.json
|
||||
|
||||
**Template Reference**: `~/.claude/workflows/cli-templates/ui-design/systems/design-tokens.json`
|
||||
|
||||
**Format**: W3C Design Tokens Community Group Specification
|
||||
|
||||
**Schema Structure**:
|
||||
```json
|
||||
{
|
||||
"$schema": "https://tr.designtokens.org/format/",
|
||||
"name": "string - Token set name",
|
||||
"description": "string - Token set description",
|
||||
|
||||
"color": {
|
||||
"background": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" }, "$description": "optional" },
|
||||
"foreground": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"card": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"card-foreground": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"border": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"input": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"ring": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
|
||||
"interactive": {
|
||||
"primary": {
|
||||
"default": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"hover": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"active": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"disabled": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"foreground": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } }
|
||||
},
|
||||
"secondary": { "/* Same structure as primary */" },
|
||||
"accent": { "/* Same structure (no disabled state) */" },
|
||||
"destructive": { "/* Same structure (no active/disabled states) */" }
|
||||
},
|
||||
|
||||
"muted": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"muted-foreground": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
|
||||
"chart": {
|
||||
"1": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"2": { "/* ... */" },
|
||||
"3": { "/* ... */" },
|
||||
"4": { "/* ... */" },
|
||||
"5": { "/* ... */" }
|
||||
},
|
||||
|
||||
"sidebar": {
|
||||
"background": { "$type": "color", "$value": { "light": "oklch(...)", "dark": "oklch(...)" } },
|
||||
"foreground": { "/* ... */" },
|
||||
"primary": { "/* ... */" },
|
||||
"primary-foreground": { "/* ... */" },
|
||||
"accent": { "/* ... */" },
|
||||
"accent-foreground": { "/* ... */" },
|
||||
"border": { "/* ... */" },
|
||||
"ring": { "/* ... */" }
|
||||
}
|
||||
},
|
||||
|
||||
"typography": {
|
||||
"font_families": {
|
||||
"sans": "string - 'Font Name', fallback1, fallback2",
|
||||
"serif": "string",
|
||||
"mono": "string"
|
||||
},
|
||||
"font_sizes": {
|
||||
"xs": "0.75rem",
|
||||
"sm": "0.875rem",
|
||||
"base": "1rem",
|
||||
"lg": "1.125rem",
|
||||
"xl": "1.25rem",
|
||||
"2xl": "1.5rem",
|
||||
"3xl": "1.875rem",
|
||||
"4xl": "2.25rem"
|
||||
},
|
||||
"line_heights": {
|
||||
"tight": "number",
|
||||
"normal": "number",
|
||||
"relaxed": "number"
|
||||
},
|
||||
"letter_spacing": {
|
||||
"tight": "string",
|
||||
"normal": "string",
|
||||
"wide": "string"
|
||||
},
|
||||
"combinations": [
|
||||
{
|
||||
"name": "h1|h2|h3|h4|h5|h6|body|caption",
|
||||
"font_family": "sans|serif|mono",
|
||||
"font_size": "string - reference to font_sizes",
|
||||
"font_weight": "number - 400|500|600|700",
|
||||
"line_height": "string",
|
||||
"letter_spacing": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"spacing": {
|
||||
"0": "0",
|
||||
"1": "0.25rem",
|
||||
"2": "0.5rem",
|
||||
"/* Systematic scale: 3, 4, 6, 8, 12, 16, 20, 24, 32, 40, 48, 56, 64 */"
|
||||
},
|
||||
|
||||
"opacity": {
|
||||
"disabled": "0.5",
|
||||
"hover": "0.8",
|
||||
"active": "1"
|
||||
},
|
||||
|
||||
"shadows": {
|
||||
"2xs": "string - CSS shadow value",
|
||||
"xs": "string",
|
||||
"sm": "string",
|
||||
"DEFAULT": "string",
|
||||
"md": "string",
|
||||
"lg": "string",
|
||||
"xl": "string",
|
||||
"2xl": "string"
|
||||
},
|
||||
|
||||
"border_radius": {
|
||||
"sm": "string - calc() or fixed",
|
||||
"md": "string",
|
||||
"lg": "string",
|
||||
"xl": "string",
|
||||
"DEFAULT": "string"
|
||||
},
|
||||
|
||||
"breakpoints": {
|
||||
"sm": "640px",
|
||||
"md": "768px",
|
||||
"lg": "1024px",
|
||||
"xl": "1280px",
|
||||
"2xl": "1536px"
|
||||
},
|
||||
|
||||
"component": {
|
||||
"/* COMPONENT PATTERN - Apply to: button, card, input, dialog, dropdown, toast, accordion, tabs, switch, checkbox, badge, alert */": {
|
||||
"$type": "component",
|
||||
"base": {
|
||||
"/* Layout properties using camelCase */": "value or {token.path}",
|
||||
"display": "inline-flex|flex|block",
|
||||
"alignItems": "center",
|
||||
"borderRadius": "{border_radius.md}",
|
||||
"transition": "{transitions.default}"
|
||||
},
|
||||
"size": {
|
||||
"small": { "height": "32px", "padding": "{spacing.2} {spacing.3}", "fontSize": "{typography.font_sizes.xs}" },
|
||||
"default": { "height": "40px", "padding": "{spacing.2} {spacing.4}" },
|
||||
"large": { "height": "48px", "padding": "{spacing.3} {spacing.6}", "fontSize": "{typography.font_sizes.base}" }
|
||||
},
|
||||
"variant": {
|
||||
"variantName": {
|
||||
"default": { "backgroundColor": "{color.interactive.primary.default}", "color": "{color.interactive.primary.foreground}" },
|
||||
"hover": { "backgroundColor": "{color.interactive.primary.hover}" },
|
||||
"active": { "backgroundColor": "{color.interactive.primary.active}" },
|
||||
"disabled": { "backgroundColor": "{color.interactive.primary.disabled}", "opacity": "{opacity.disabled}", "cursor": "not-allowed" },
|
||||
"focus": { "outline": "2px solid {color.ring}", "outlineOffset": "2px" }
|
||||
}
|
||||
},
|
||||
"state": {
|
||||
"/* For stateful components (dialog, accordion, etc.) */": {
|
||||
"open": { "animation": "{animation.name.component-open} {animation.duration.normal} {animation.easing.ease-out}" },
|
||||
"closed": { "animation": "{animation.name.component-close} {animation.duration.normal} {animation.easing.ease-in}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"elevation": {
|
||||
"$type": "elevation",
|
||||
"base": { "$value": "0" },
|
||||
"overlay": { "$value": "40" },
|
||||
"dropdown": { "$value": "50" },
|
||||
"dialog": { "$value": "50" },
|
||||
"tooltip": { "$value": "60" }
|
||||
},
|
||||
|
||||
"_metadata": {
|
||||
"version": "string - W3C version or custom version",
|
||||
"created": "ISO timestamp - 2024-01-01T00:00:00Z",
|
||||
"source": "code-import|explore|text",
|
||||
"theme_colors_guide": {
|
||||
"description": "Theme colors are the core brand identity colors that define the visual hierarchy and emotional tone of the design system",
|
||||
"primary": {
|
||||
"role": "Main brand color",
|
||||
"usage": "Primary actions (CTAs, key interactive elements, navigation highlights, primary buttons)",
|
||||
"contrast_requirement": "WCAG AA - 4.5:1 for text, 3:1 for UI components"
|
||||
},
|
||||
"secondary": {
|
||||
"role": "Supporting brand color",
|
||||
"usage": "Secondary actions and complementary elements (less prominent buttons, secondary navigation, supporting features)",
|
||||
"principle": "Should complement primary without competing for attention"
|
||||
},
|
||||
"accent": {
|
||||
"role": "Highlight color for emphasis",
|
||||
"usage": "Attention-grabbing elements used sparingly (badges, notifications, special promotions, highlights)",
|
||||
"principle": "Should create strong visual contrast to draw focus"
|
||||
},
|
||||
"destructive": {
|
||||
"role": "Error and destructive action color",
|
||||
"usage": "Delete buttons, error messages, critical warnings",
|
||||
"principle": "Must signal danger or caution clearly"
|
||||
},
|
||||
"harmony_note": "All theme colors must work harmoniously together and align with brand identity. In multi-file extraction, prioritize definitions with semantic comments explaining brand intent."
|
||||
},
|
||||
"conflicts": [
|
||||
{
|
||||
"token_name": "string - which token has conflicts",
|
||||
"category": "string - colors|typography|etc",
|
||||
"definitions": [
|
||||
{
|
||||
"value": "string - token value",
|
||||
"source_file": "string - absolute path",
|
||||
"line_number": "number",
|
||||
"context": "string - surrounding comment or null",
|
||||
"semantic_intent": "string - interpretation of definition"
|
||||
}
|
||||
],
|
||||
"selected_value": "string - final chosen value",
|
||||
"selection_reason": "string - why this value was chosen"
|
||||
}
|
||||
],
|
||||
"code_snippets": [
|
||||
{
|
||||
"category": "colors|typography|spacing|shadows|border_radius|component",
|
||||
"token_name": "string - which token this snippet defines",
|
||||
"source_file": "string - absolute path",
|
||||
"line_start": "number",
|
||||
"line_end": "number",
|
||||
"snippet": "string - complete code block",
|
||||
"context_type": "css-variable|css-class|js-object|scss-variable|etc"
|
||||
}
|
||||
],
|
||||
"usage_recommendations": {
|
||||
"typography": {
|
||||
"common_sizes": {
|
||||
"small_text": "sm (0.875rem)",
|
||||
"body_text": "base (1rem)",
|
||||
"heading": "2xl-4xl"
|
||||
},
|
||||
"common_combinations": [
|
||||
{
|
||||
"name": "Heading + Body",
|
||||
"heading": "2xl",
|
||||
"body": "base",
|
||||
"use_case": "Article sections"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spacing": {
|
||||
"size_guide": {
|
||||
"tight": "1-2 (0.25rem-0.5rem)",
|
||||
"normal": "4-6 (1rem-1.5rem)",
|
||||
"loose": "8-12 (2rem-3rem)"
|
||||
},
|
||||
"common_patterns": [
|
||||
{
|
||||
"pattern": "padding-4 margin-bottom-6",
|
||||
"use_case": "Card content spacing",
|
||||
"pixel_value": "1rem padding, 1.5rem margin"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
**Structure Overview**:
|
||||
- **color**: Base colors, interactive states (primary, secondary, accent, destructive), muted, chart, sidebar
|
||||
- **typography**: Font families, sizes, line heights, letter spacing, combinations
|
||||
- **spacing**: Systematic scale (0-64, multiples of 0.25rem)
|
||||
- **opacity**: disabled, hover, active
|
||||
- **shadows**: 2xs to 2xl (8-tier system)
|
||||
- **border_radius**: sm to xl + DEFAULT
|
||||
- **breakpoints**: sm to 2xl
|
||||
- **component**: 12+ components with base, size, variant, state structures
|
||||
- **elevation**: z-index values for layered components
|
||||
- **_metadata**: version, created, source, theme_colors_guide, conflicts, code_snippets, usage_recommendations
|
||||
|
||||
**Required Components** (12+ components, use pattern above):
|
||||
- **button**: 5 variants (primary, secondary, destructive, outline, ghost) + 3 sizes + states (default, hover, active, disabled, focus)
|
||||
@@ -637,136 +385,26 @@ You execute 6 distinct task types organized into 3 patterns. Each task includes
|
||||
|
||||
### layout-templates.json
|
||||
|
||||
**Template Reference**: `~/.claude/workflows/cli-templates/ui-design/systems/layout-templates.json`
|
||||
|
||||
**Optimization**: Unified structure combining DOM and styling into single hierarchy
|
||||
|
||||
**Schema Structure**:
|
||||
```json
|
||||
{
|
||||
"$schema": "https://tr.designtokens.org/format/",
|
||||
"templates": [
|
||||
{
|
||||
"target": "string - page/component name (e.g., hero-section, product-card)",
|
||||
"description": "string - layout description",
|
||||
"component_type": "universal|specialized",
|
||||
"device_type": "mobile|tablet|desktop|responsive",
|
||||
"layout_strategy": "string - grid-3col|flex-row|stack|sidebar|etc",
|
||||
|
||||
"structure": {
|
||||
"tag": "string - HTML5 semantic tag (header|nav|main|section|article|aside|footer|div|etc)",
|
||||
"attributes": {
|
||||
"class": "string - semantic class name",
|
||||
"role": "string - ARIA role (navigation|main|complementary|etc)",
|
||||
"aria-label": "string - ARIA label",
|
||||
"aria-describedby": "string - ARIA describedby",
|
||||
"data-state": "string - data attributes for state management (open|closed|etc)"
|
||||
},
|
||||
"layout": {
|
||||
"/* LAYOUT PROPERTIES ONLY - Use camelCase for property names */": "",
|
||||
"display": "grid|flex|block|inline-flex",
|
||||
"grid-template-columns": "{spacing.*} or CSS value (repeat(3, 1fr))",
|
||||
"grid-template-rows": "string",
|
||||
"gap": "{spacing.*}",
|
||||
"padding": "{spacing.*}",
|
||||
"margin": "{spacing.*}",
|
||||
"alignItems": "start|center|end|stretch",
|
||||
"justifyContent": "start|center|end|space-between|space-around",
|
||||
"flexDirection": "row|column",
|
||||
"flexWrap": "wrap|nowrap",
|
||||
"position": "relative|absolute|fixed|sticky",
|
||||
"top|right|bottom|left": "string",
|
||||
"width": "string",
|
||||
"height": "string",
|
||||
"maxWidth": "string",
|
||||
"minHeight": "string"
|
||||
},
|
||||
"responsive": {
|
||||
"/* ONLY properties that CHANGE at each breakpoint - NO repetition */": "",
|
||||
"sm": {
|
||||
"grid-template-columns": "1fr",
|
||||
"padding": "{spacing.4}"
|
||||
},
|
||||
"md": {
|
||||
"grid-template-columns": "repeat(2, 1fr)",
|
||||
"gap": "{spacing.6}"
|
||||
},
|
||||
"lg": {
|
||||
"grid-template-columns": "repeat(3, 1fr)"
|
||||
}
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"/* Recursive structure - same fields as parent */": "",
|
||||
"tag": "string",
|
||||
"attributes": {},
|
||||
"layout": {},
|
||||
"responsive": {},
|
||||
"children": [],
|
||||
"content": "string or {{placeholder}}"
|
||||
}
|
||||
],
|
||||
"content": "string - text content or {{placeholder}} for dynamic content"
|
||||
},
|
||||
|
||||
"accessibility": {
|
||||
"patterns": [
|
||||
"string - ARIA patterns used (e.g., WAI-ARIA Tabs pattern, Dialog pattern)"
|
||||
],
|
||||
"keyboard_navigation": [
|
||||
"string - keyboard shortcuts (e.g., Tab/Shift+Tab navigation, Escape to close)"
|
||||
],
|
||||
"focus_management": "string - focus trap strategy, initial focus target",
|
||||
"screen_reader_notes": [
|
||||
"string - screen reader announcements (e.g., Dialog opened, Tab selected)"
|
||||
]
|
||||
},
|
||||
|
||||
"usage_guide": {
|
||||
"common_sizes": {
|
||||
"small": {
|
||||
"dimensions": "string - e.g., px-3 py-1.5 (height: ~32px)",
|
||||
"use_case": "string - Compact UI, mobile views"
|
||||
},
|
||||
"medium": {
|
||||
"dimensions": "string - e.g., px-4 py-2 (height: ~40px)",
|
||||
"use_case": "string - Default size for most contexts"
|
||||
},
|
||||
"large": {
|
||||
"dimensions": "string - e.g., px-6 py-3 (height: ~48px)",
|
||||
"use_case": "string - Prominent CTAs, hero sections"
|
||||
}
|
||||
},
|
||||
"variant_recommendations": {
|
||||
"variant_name": {
|
||||
"description": "string - when to use this variant",
|
||||
"typical_actions": ["string - action examples"]
|
||||
}
|
||||
},
|
||||
"usage_context": [
|
||||
"string - typical usage scenarios (e.g., Landing page hero, Product listing grid)"
|
||||
],
|
||||
"accessibility_tips": [
|
||||
"string - accessibility best practices (e.g., Ensure heading hierarchy, Add aria-label)"
|
||||
]
|
||||
},
|
||||
|
||||
"extraction_metadata": {
|
||||
"source": "code-import|explore|text",
|
||||
"created": "ISO timestamp",
|
||||
"code_snippets": [
|
||||
{
|
||||
"component_name": "string - which layout component",
|
||||
"source_file": "string - absolute path",
|
||||
"line_start": "number",
|
||||
"line_end": "number",
|
||||
"snippet": "string - complete HTML/CSS/JS code block",
|
||||
"context_type": "html-structure|css-utility|react-component|vue-component|etc"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
**Structure Overview**:
|
||||
- **templates[]**: Array of layout templates
|
||||
- **target**: page/component name (hero-section, product-card)
|
||||
- **component_type**: universal | specialized
|
||||
- **device_type**: mobile | tablet | desktop | responsive
|
||||
- **layout_strategy**: grid-3col, flex-row, stack, sidebar, etc.
|
||||
- **structure**: Unified DOM + layout hierarchy
|
||||
- **tag**: HTML5 semantic tags
|
||||
- **attributes**: class, role, aria-*, data-state
|
||||
- **layout**: Layout properties only (display, grid, flex, position, spacing) using {token.path}
|
||||
- **responsive**: Breakpoint-specific overrides (ONLY changed properties)
|
||||
- **children**: Recursive structure
|
||||
- **content**: Text or {{placeholder}}
|
||||
- **accessibility**: patterns, keyboard_navigation, focus_management, screen_reader_notes
|
||||
- **usage_guide**: common_sizes, variant_recommendations, usage_context, accessibility_tips
|
||||
- **extraction_metadata**: source, created, code_snippets
|
||||
|
||||
**Field Rules**:
|
||||
- $schema MUST reference W3C Design Tokens format specification
|
||||
@@ -784,149 +422,25 @@ You execute 6 distinct task types organized into 3 patterns. Each task includes
|
||||
- usage_guide OPTIONAL for specialized components (can be simplified or omitted)
|
||||
- extraction_metadata.code_snippets ONLY present in Code Import mode
|
||||
|
||||
**Structure Optimization Benefits**:
|
||||
- Eliminates redundancy between dom_structure and css_layout_rules
|
||||
- Layout properties are co-located with corresponding DOM elements
|
||||
- Responsive overrides apply directly to the element they affect
|
||||
- Single source of truth for each element's structure and layout
|
||||
- Easier to maintain and understand hierarchy
|
||||
|
||||
|
||||
### animation-tokens.json
|
||||
|
||||
**Schema Structure**:
|
||||
```json
|
||||
{
|
||||
"$schema": "https://tr.designtokens.org/format/",
|
||||
**Template Reference**: `~/.claude/workflows/cli-templates/ui-design/systems/animation-tokens.json`
|
||||
|
||||
"duration": {
|
||||
"$type": "duration",
|
||||
"instant": { "$value": "0ms" },
|
||||
"fast": { "$value": "150ms" },
|
||||
"normal": { "$value": "300ms" },
|
||||
"slow": { "$value": "500ms" },
|
||||
"slower": { "$value": "1000ms" }
|
||||
},
|
||||
|
||||
"easing": {
|
||||
"$type": "cubicBezier",
|
||||
"linear": { "$value": "linear" },
|
||||
"ease-in": { "$value": "cubic-bezier(0.4, 0, 1, 1)" },
|
||||
"ease-out": { "$value": "cubic-bezier(0, 0, 0.2, 1)" },
|
||||
"ease-in-out": { "$value": "cubic-bezier(0.4, 0, 0.2, 1)" },
|
||||
"spring": { "$value": "cubic-bezier(0.68, -0.55, 0.265, 1.55)" },
|
||||
"bounce": { "$value": "cubic-bezier(0.68, -0.6, 0.32, 1.6)" }
|
||||
},
|
||||
|
||||
"keyframes": {
|
||||
"/* PATTERN: Define pairs (in/out, open/close, enter/exit) */": {
|
||||
"0%": { "/* CSS properties */": "value" },
|
||||
"100%": { "/* CSS properties */": "value" }
|
||||
},
|
||||
"/* Required keyframes for components: */": "",
|
||||
"fade-in": { "0%": { "opacity": "0" }, "100%": { "opacity": "1" } },
|
||||
"fade-out": { "/* reverse of fade-in */" },
|
||||
"slide-up": { "0%": { "transform": "translateY(10px)", "opacity": "0" }, "100%": { "transform": "translateY(0)", "opacity": "1" } },
|
||||
"slide-down": { "/* reverse direction */" },
|
||||
"scale-in": { "0%": { "transform": "scale(0.95)", "opacity": "0" }, "100%": { "transform": "scale(1)", "opacity": "1" } },
|
||||
"scale-out": { "/* reverse of scale-in */" },
|
||||
"accordion-down": { "0%": { "height": "0", "opacity": "0" }, "100%": { "height": "var(--radix-accordion-content-height)", "opacity": "1" } },
|
||||
"accordion-up": { "/* reverse */" },
|
||||
"dialog-open": { "0%": { "transform": "translate(-50%, -48%) scale(0.96)", "opacity": "0" }, "100%": { "transform": "translate(-50%, -50%) scale(1)", "opacity": "1" } },
|
||||
"dialog-close": { "/* reverse */" },
|
||||
"dropdown-open": { "0%": { "transform": "scale(0.95) translateY(-4px)", "opacity": "0" }, "100%": { "transform": "scale(1) translateY(0)", "opacity": "1" } },
|
||||
"dropdown-close": { "/* reverse */" },
|
||||
"toast-enter": { "0%": { "transform": "translateX(100%)", "opacity": "0" }, "100%": { "transform": "translateX(0)", "opacity": "1" } },
|
||||
"toast-exit": { "/* reverse */" },
|
||||
"spin": { "0%": { "transform": "rotate(0deg)" }, "100%": { "transform": "rotate(360deg)" } },
|
||||
"pulse": { "0%, 100%": { "opacity": "1" }, "50%": { "opacity": "0.5" } }
|
||||
},
|
||||
|
||||
"interactions": {
|
||||
"/* PATTERN: Define for each interactive component state */": {
|
||||
"property": "string - CSS properties (comma-separated)",
|
||||
"duration": "{duration.*}",
|
||||
"easing": "{easing.*}"
|
||||
},
|
||||
"button-hover": { "property": "background-color, transform", "duration": "{duration.fast}", "easing": "{easing.ease-out}" },
|
||||
"button-active": { "property": "transform", "duration": "{duration.instant}", "easing": "{easing.ease-in}" },
|
||||
"card-hover": { "property": "box-shadow, transform", "duration": "{duration.normal}", "easing": "{easing.ease-in-out}" },
|
||||
"input-focus": { "property": "border-color, box-shadow", "duration": "{duration.fast}", "easing": "{easing.ease-out}" },
|
||||
"dropdown-toggle": { "property": "opacity, transform", "duration": "{duration.fast}", "easing": "{easing.ease-out}" },
|
||||
"accordion-toggle": { "property": "height, opacity", "duration": "{duration.normal}", "easing": "{easing.ease-in-out}" },
|
||||
"dialog-toggle": { "property": "opacity, transform", "duration": "{duration.normal}", "easing": "{easing.spring}" },
|
||||
"tabs-switch": { "property": "color, border-color", "duration": "{duration.fast}", "easing": "{easing.ease-in-out}" }
|
||||
},
|
||||
|
||||
"transitions": {
|
||||
"default": { "$value": "all {duration.normal} {easing.ease-in-out}" },
|
||||
"colors": { "$value": "color {duration.fast} {easing.linear}, background-color {duration.fast} {easing.linear}" },
|
||||
"transform": { "$value": "transform {duration.normal} {easing.spring}" },
|
||||
"opacity": { "$value": "opacity {duration.fast} {easing.linear}" },
|
||||
"all-smooth": { "$value": "all {duration.slow} {easing.ease-in-out}" }
|
||||
},
|
||||
|
||||
"component_animations": {
|
||||
"/* PATTERN: Map each component to its animations - MUST match design-tokens.json component list */": {
|
||||
"stateOrInteraction": {
|
||||
"animation": "keyframe-name {duration.*} {easing.*} OR none",
|
||||
"transition": "{interactions.*} OR none"
|
||||
}
|
||||
},
|
||||
"button": {
|
||||
"hover": { "animation": "none", "transition": "{interactions.button-hover}" },
|
||||
"active": { "animation": "none", "transition": "{interactions.button-active}" }
|
||||
},
|
||||
"card": {
|
||||
"hover": { "animation": "none", "transition": "{interactions.card-hover}" }
|
||||
},
|
||||
"input": {
|
||||
"focus": { "animation": "none", "transition": "{interactions.input-focus}" }
|
||||
},
|
||||
"dialog": {
|
||||
"open": { "animation": "dialog-open {duration.normal} {easing.spring}" },
|
||||
"close": { "animation": "dialog-close {duration.normal} {easing.ease-in}" }
|
||||
},
|
||||
"dropdown": {
|
||||
"open": { "animation": "dropdown-open {duration.fast} {easing.ease-out}" },
|
||||
"close": { "animation": "dropdown-close {duration.fast} {easing.ease-in}" }
|
||||
},
|
||||
"toast": {
|
||||
"enter": { "animation": "toast-enter {duration.normal} {easing.ease-out}" },
|
||||
"exit": { "animation": "toast-exit {duration.normal} {easing.ease-in}" }
|
||||
},
|
||||
"accordion": {
|
||||
"open": { "animation": "accordion-down {duration.normal} {easing.ease-out}" },
|
||||
"close": { "animation": "accordion-up {duration.normal} {easing.ease-in}" }
|
||||
},
|
||||
"/* Add mappings for: tabs, switch, checkbox, badge, alert */" : {}
|
||||
},
|
||||
|
||||
"accessibility": {
|
||||
"prefers_reduced_motion": {
|
||||
"duration": "0ms",
|
||||
"keyframes": {},
|
||||
"note": "Disable animations when user prefers reduced motion",
|
||||
"css_rule": "@media (prefers-reduced-motion: reduce) { *, *::before, *::after { animation-duration: 0.01ms !important; animation-iteration-count: 1 !important; transition-duration: 0.01ms !important; } }"
|
||||
}
|
||||
},
|
||||
|
||||
"_metadata": {
|
||||
"version": "string",
|
||||
"created": "ISO timestamp",
|
||||
"source": "code-import|explore|text",
|
||||
"code_snippets": [
|
||||
{
|
||||
"animation_name": "string - keyframe/transition name",
|
||||
"source_file": "string - absolute path",
|
||||
"line_start": "number",
|
||||
"line_end": "number",
|
||||
"snippet": "string - complete @keyframes or transition code",
|
||||
"context_type": "css-keyframes|css-transition|js-animation|scss-animation|etc"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
**Structure Overview**:
|
||||
- **duration**: instant (0ms), fast (150ms), normal (300ms), slow (500ms), slower (1000ms)
|
||||
- **easing**: linear, ease-in, ease-out, ease-in-out, spring, bounce
|
||||
- **keyframes**: Animation definitions in pairs (in/out, open/close, enter/exit)
|
||||
- Required: fade-in/out, slide-up/down, scale-in/out, accordion-down/up, dialog-open/close, dropdown-open/close, toast-enter/exit, spin, pulse
|
||||
- **interactions**: Component interaction animations with property, duration, easing
|
||||
- button-hover/active, card-hover, input-focus, dropdown-toggle, accordion-toggle, dialog-toggle, tabs-switch
|
||||
- **transitions**: default, colors, transform, opacity, all-smooth
|
||||
- **component_animations**: Maps components to animations (MUST match design-tokens.json components)
|
||||
- State-based: dialog, dropdown, toast, accordion (use keyframes)
|
||||
- Interaction: button, card, input, tabs (use transitions)
|
||||
- **accessibility**: prefers_reduced_motion with CSS rule
|
||||
- **_metadata**: version, created, source, code_snippets
|
||||
|
||||
**Field Rules**:
|
||||
- $schema MUST reference W3C Design Tokens format specification
|
||||
|
||||
@@ -120,7 +120,11 @@ Before completing any task, verify:
|
||||
- Make assumptions - verify with existing materials
|
||||
- Skip quality verification steps
|
||||
|
||||
**Bash Tool**:
|
||||
- Use `run_in_background=false` for all Bash/CLI calls to ensure foreground execution
|
||||
|
||||
**ALWAYS:**
|
||||
- **Search Tool Priority**: ACE (`mcp__ace-tool__search_context`) → CCW (`mcp__ccw-tools__smart_search`) / Built-in (`Grep`, `Glob`, `Read`)
|
||||
- Verify resource/dependency existence before referencing
|
||||
- Execute tasks systematically and incrementally
|
||||
- Test and validate work thoroughly
|
||||
|
||||
18
.claude/cli-settings.json
Normal file
18
.claude/cli-settings.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"defaultTool": "gemini",
|
||||
"promptFormat": "plain",
|
||||
"smartContext": {
|
||||
"enabled": false,
|
||||
"maxFiles": 10
|
||||
},
|
||||
"nativeResume": true,
|
||||
"recursiveQuery": true,
|
||||
"cache": {
|
||||
"injectionMode": "auto",
|
||||
"defaultPrefix": "",
|
||||
"defaultSuffix": ""
|
||||
},
|
||||
"codeIndexMcp": "ace",
|
||||
"$schema": "./cli-settings.schema.json"
|
||||
}
|
||||
1102
.claude/commands/ccw-coordinator.md
Normal file
1102
.claude/commands/ccw-coordinator.md
Normal file
File diff suppressed because it is too large
Load Diff
832
.claude/commands/ccw-debug.md
Normal file
832
.claude/commands/ccw-debug.md
Normal file
@@ -0,0 +1,832 @@
|
||||
---
|
||||
name: ccw-debug
|
||||
description: Aggregated debug command - combines debugging diagnostics and test verification in a synergistic workflow supporting cli-quick / debug-first / test-first / bidirectional-verification modes
|
||||
argument-hint: "[--mode cli|debug|test|bidirectional] [--yes|-y] [--hotfix] \"bug description or error message\""
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), AskUserQuestion(*), Read(*), Bash(*)
|
||||
---
|
||||
|
||||
# CCW-Debug Aggregated Command
|
||||
|
||||
## Core Concept
|
||||
|
||||
**Aggregated Debug Command** - Combines debugging diagnostics and test verification in a synergistic workflow. Not a simple concatenation of two commands, but intelligent orchestration based on mode selection.
|
||||
|
||||
### Four Execution Modes
|
||||
|
||||
| Mode | Workflow | Use Case | Characteristics |
|
||||
|------|----------|----------|-----------------|
|
||||
| **CLI Quick** (cli) | Direct CLI Analysis → Fix Suggestions | Simple issues, quick diagnosis | Fastest, minimal workflow, recommendation-only |
|
||||
| **Debug First** (debug) | Debug → Analyze Hypotheses → Apply Fix → Test Verification | Root cause unclear, requires exploration | Starts with exploration, Gemini-assisted |
|
||||
| **Test First** (test) | Generate Tests → Execute → Analyze Failures → CLI Fixes | Code implemented, needs test validation | Driven by test coverage, auto-iterates |
|
||||
| **Bidirectional Verification** (bidirectional) | Parallel: Debug + Test → Merge Findings → Unified Fix | Complex systems, ambiguous symptoms | Parallel execution, converged insights |
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# CLI quick mode: fastest, recommendation-only (new!)
|
||||
/ccw-debug --mode cli "Login failed: token validation error"
|
||||
|
||||
# Default mode: debug-first (recommended for most scenarios)
|
||||
/ccw-debug "Login failed: token validation error"
|
||||
|
||||
# Test-first mode
|
||||
/ccw-debug --mode test "User permission check failure"
|
||||
|
||||
# Bidirectional verification mode (complex issues)
|
||||
/ccw-debug --mode bidirectional "Payment flow multiple failures"
|
||||
|
||||
# Auto mode (skip all confirmations)
|
||||
/ccw-debug --yes "Quick fix: database connection timeout"
|
||||
|
||||
# Production hotfix (minimal diagnostics)
|
||||
/ccw-debug --hotfix --yes "Production: API returns 500"
|
||||
```
|
||||
|
||||
### Mode Selection Guide
|
||||
|
||||
**Choose "CLI Quick"** when:
|
||||
- Need immediate diagnosis, not execution
|
||||
- Want quick recommendations without workflows
|
||||
- Simple issues with clear symptoms
|
||||
- Just need fix suggestions, no auto-application
|
||||
- Time is critical, prefer fast output
|
||||
- Want to review CLI analysis before action
|
||||
|
||||
**Choose "Debug First"** when:
|
||||
- Root cause is unclear
|
||||
- Error messages are incomplete or vague
|
||||
- Need to understand code execution flow
|
||||
- Issues involve multi-module interactions
|
||||
|
||||
**Choose "Test First"** when:
|
||||
- Code is fully implemented
|
||||
- Need test coverage verification
|
||||
- Have clear failure cases
|
||||
- Want automated iterative fixes
|
||||
|
||||
**Choose "Bidirectional Verification"** when:
|
||||
- System is complex (multiple subsystems)
|
||||
- Problem symptoms are ambiguous (multiple possible root causes)
|
||||
- Need multi-angle validation
|
||||
- Time allows parallel analysis
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Overall Process
|
||||
|
||||
```
|
||||
Phase 1: Intent Analysis & Mode Selection
|
||||
├─ Parse --mode flag or recommend mode
|
||||
├─ Check --hotfix and --yes flags
|
||||
└─ Determine workflow path
|
||||
|
||||
Phase 2: Initialization
|
||||
├─ CLI Quick: Lightweight init (no session directory needed)
|
||||
├─ Others: Create unified session directory (.workflow/.ccw-debug/)
|
||||
├─ Setup TodoWrite tracking
|
||||
└─ Prepare session context
|
||||
|
||||
Phase 3: Execute Corresponding Workflow
|
||||
├─ CLI Quick: ccw cli → Diagnosis Report → Optional: Escalate to debug/test/apply fix
|
||||
├─ Debug First: /workflow:debug-with-file → Fix → /workflow:test-fix-gen → /workflow:test-cycle-execute
|
||||
├─ Test First: /workflow:test-fix-gen → /workflow:test-cycle-execute → CLI analyze failures
|
||||
└─ Bidirectional: [/workflow:debug-with-file] ∥ [/workflow:test-fix-gen → test-cycle-execute]
|
||||
|
||||
Phase 4: Merge Findings (Bidirectional Mode) / Escalation Decision (CLI Mode)
|
||||
├─ CLI Quick: Present results → Ask user: Apply fix? Escalate? Done?
|
||||
├─ Bidirectional: Converge findings from both workflows
|
||||
├─ Identify consistent and conflicting root cause analyses
|
||||
└─ Generate unified fix plan
|
||||
|
||||
Phase 5: Completion & Follow-up
|
||||
├─ Generate summary report
|
||||
├─ Provide next step recommendations
|
||||
└─ Optional: Expand to issues (testing/enhancement/refactoring/documentation)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workflow Details
|
||||
|
||||
### Mode 0: CLI Quick (Minimal Debug Method)
|
||||
|
||||
**Best For**: Fast recommendations without full workflow overhead
|
||||
|
||||
**Workflow**:
|
||||
```
|
||||
User Input → Quick Context Gather → ccw cli (Gemini/Qwen/Codex)
|
||||
↓
|
||||
Analysis Report
|
||||
↓
|
||||
Fix Recommendations
|
||||
↓
|
||||
Optional: User Decision
|
||||
┌──────────────┼──────────────┐
|
||||
↓ ↓ ↓
|
||||
Apply Fix Escalate Mode Done
|
||||
(debug/test)
|
||||
```
|
||||
|
||||
**Execution Steps**:
|
||||
|
||||
1. **Lightweight Context Gather** (Phase 2)
|
||||
```javascript
|
||||
// No session directory needed for CLI mode
|
||||
const tempContext = {
|
||||
bug_description: bug_description,
|
||||
timestamp: getUtc8ISOString(),
|
||||
mode: "cli"
|
||||
}
|
||||
|
||||
// Quick context discovery (30s max)
|
||||
// - Read error file if path provided
|
||||
// - Extract error patterns from description
|
||||
// - Identify likely affected files (basic grep)
|
||||
```
|
||||
|
||||
2. **Execute CLI Analysis** (Phase 3)
|
||||
```bash
|
||||
# Use ccw cli with bug diagnosis template
|
||||
ccw cli -p "
|
||||
PURPOSE: Quick bug diagnosis for immediate recommendations
|
||||
|
||||
TASK:
|
||||
• Analyze bug symptoms: ${bug_description}
|
||||
• Identify likely root cause
|
||||
• Provide actionable fix recommendations (code snippets if possible)
|
||||
• Assess fix confidence level
|
||||
|
||||
MODE: analysis
|
||||
|
||||
CONTEXT: ${contextFiles.length > 0 ? '@' + contextFiles.join(' @') : 'Bug description only'}
|
||||
|
||||
EXPECTED:
|
||||
- Root cause hypothesis (1-2 sentences)
|
||||
- Fix strategy (immediate/comprehensive/refactor)
|
||||
- Code snippets or file modification suggestions
|
||||
- Confidence level: High/Medium/Low
|
||||
- Risk assessment
|
||||
|
||||
CONSTRAINTS: Quick analysis, 2-5 minutes max
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
|
||||
```
|
||||
|
||||
3. **Present Results** (Phase 4)
|
||||
```
|
||||
## CLI Quick Analysis Complete
|
||||
|
||||
**Issue**: [bug_description]
|
||||
**Analysis Time**: [duration]
|
||||
**Confidence**: [High/Medium/Low]
|
||||
|
||||
### Root Cause
|
||||
[1-2 sentence hypothesis]
|
||||
|
||||
### Fix Strategy
|
||||
[immediate_patch | comprehensive_fix | refactor]
|
||||
|
||||
### Recommended Changes
|
||||
|
||||
**File**: src/module/file.ts
|
||||
```typescript
|
||||
// Change line 45-50
|
||||
- old code
|
||||
+ new code
|
||||
```
|
||||
|
||||
**Rationale**: [why this fix]
|
||||
**Risk**: [Low/Medium/High] - [risk description]
|
||||
|
||||
### Confidence Assessment
|
||||
- Analysis confidence: [percentage]
|
||||
- Recommendation: [apply immediately | review first | escalate to full debug]
|
||||
```
|
||||
|
||||
4. **User Decision** (Phase 5)
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes && confidence === 'High') {
|
||||
// Auto-apply fix
|
||||
console.log('[--yes + High confidence] Auto-applying fix...')
|
||||
applyFixFromCLIRecommendation(cliOutput)
|
||||
} else {
|
||||
// Ask user
|
||||
const decision = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `CLI analysis complete (${confidence} confidence). What next?`,
|
||||
header: "Decision",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Apply Fix", description: "Apply recommended changes immediately" },
|
||||
{ label: "Escalate to Debug", description: "Switch to debug-first for deeper analysis" },
|
||||
{ label: "Escalate to Test", description: "Switch to test-first for validation" },
|
||||
{ label: "Review Only", description: "Just review, no action" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
if (decision === "Apply Fix") {
|
||||
applyFixFromCLIRecommendation(cliOutput)
|
||||
} else if (decision === "Escalate to Debug") {
|
||||
// Re-invoke ccw-debug with --mode debug
|
||||
SlashCommand(command=`/ccw-debug --mode debug "${bug_description}"`)
|
||||
} else if (decision === "Escalate to Test") {
|
||||
// Re-invoke ccw-debug with --mode test
|
||||
SlashCommand(command=`/ccw-debug --mode test "${bug_description}"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Key Characteristics**:
|
||||
- **Speed**: 2-5 minutes total (fastest mode)
|
||||
- **Session**: No persistent session directory (lightweight)
|
||||
- **Output**: Recommendation report only
|
||||
- **Execution**: Optional, user-controlled
|
||||
- **Escalation**: Can upgrade to full debug/test workflows
|
||||
|
||||
**Limitations**:
|
||||
- No hypothesis iteration (single-shot analysis)
|
||||
- No automatic test generation
|
||||
- No instrumentation/logging
|
||||
- Best for clear symptoms with localized fixes
|
||||
|
||||
---
|
||||
|
||||
### Mode 1: Debug First
|
||||
|
||||
**Best For**: Issues requiring root cause exploration
|
||||
|
||||
**Workflow**:
|
||||
```
|
||||
User Input → Session Init → /workflow:debug-with-file
|
||||
↓
|
||||
Generate understanding.md + hypotheses
|
||||
↓
|
||||
User reproduces issue, analyze logs
|
||||
↓
|
||||
Gemini validates hypotheses
|
||||
↓
|
||||
Apply fix code
|
||||
↓
|
||||
/workflow:test-fix-gen
|
||||
↓
|
||||
/workflow:test-cycle-execute
|
||||
↓
|
||||
Generate unified report
|
||||
```
|
||||
|
||||
**Execution Steps**:
|
||||
|
||||
1. **Session Initialization** (Phase 2)
|
||||
```javascript
|
||||
const sessionId = `CCWD-${bugSlug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.ccw-debug/${sessionId}`
|
||||
bash(`mkdir -p ${sessionFolder}`)
|
||||
|
||||
// Record mode selection
|
||||
const modeConfig = {
|
||||
mode: "debug",
|
||||
original_input: bug_description,
|
||||
timestamp: getUtc8ISOString(),
|
||||
flags: { hotfix, autoYes }
|
||||
}
|
||||
Write(`${sessionFolder}/mode-config.json`, JSON.stringify(modeConfig, null, 2))
|
||||
```
|
||||
|
||||
2. **Start Debug** (Phase 3)
|
||||
```javascript
|
||||
SlashCommand(command=`/workflow:debug-with-file "${bug_description}"`)
|
||||
|
||||
// Update TodoWrite
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Debug & Analysis", status: "completed" },
|
||||
{ content: "Phase 2: Apply Fix from Debug Findings", status: "in_progress" },
|
||||
{ content: "Phase 3: Generate & Execute Tests", status: "pending" },
|
||||
{ content: "Phase 4: Generate Report", status: "pending" }
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
3. **Apply Fix** (Handled by debug command)
|
||||
|
||||
4. **Test Generation & Execution**
|
||||
```javascript
|
||||
// Auto-continue after debug command completes
|
||||
SlashCommand(command=`/workflow:test-fix-gen "Test validation for: ${bug_description}"`)
|
||||
SlashCommand(command="/workflow:test-cycle-execute")
|
||||
```
|
||||
|
||||
5. **Generate Report** (Phase 5)
|
||||
```
|
||||
## Debug-First Workflow Completed
|
||||
|
||||
**Issue**: [bug_description]
|
||||
**Mode**: Debug First
|
||||
**Session**: [sessionId]
|
||||
|
||||
### Debug Phase Results
|
||||
- Root Cause: [extracted from understanding.md]
|
||||
- Hypothesis Confirmation: [from hypotheses.json]
|
||||
- Fixes Applied: [list of modified files]
|
||||
|
||||
### Test Phase Results
|
||||
- Tests Created: [test files generated by IMPL-001]
|
||||
- Pass Rate: [final test pass rate]
|
||||
- Iteration Count: [fix iterations]
|
||||
|
||||
### Key Findings
|
||||
- [learning points from debugging]
|
||||
- [coverage insights from testing]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Mode 2: Test First
|
||||
|
||||
**Best For**: Implemented code needing test validation
|
||||
|
||||
**Workflow**:
|
||||
```
|
||||
User Input → Session Init → /workflow:test-fix-gen
|
||||
↓
|
||||
Generate test tasks (IMPL-001, IMPL-002)
|
||||
↓
|
||||
/workflow:test-cycle-execute
|
||||
↓
|
||||
Auto-iterate: Test → Analyze Failures → CLI Fix
|
||||
↓
|
||||
Until pass rate ≥ 95%
|
||||
↓
|
||||
Generate report
|
||||
```
|
||||
|
||||
**Execution Steps**:
|
||||
|
||||
1. **Session Initialization** (Phase 2)
|
||||
```javascript
|
||||
const modeConfig = {
|
||||
mode: "test",
|
||||
original_input: bug_description,
|
||||
timestamp: getUtc8ISOString(),
|
||||
flags: { hotfix, autoYes }
|
||||
}
|
||||
```
|
||||
|
||||
2. **Generate Tests** (Phase 3)
|
||||
```javascript
|
||||
SlashCommand(command=`/workflow:test-fix-gen "${bug_description}"`)
|
||||
|
||||
// Update TodoWrite
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Generate Tests", status: "completed" },
|
||||
{ content: "Phase 2: Execute & Fix Tests", status: "in_progress" },
|
||||
{ content: "Phase 3: Final Validation", status: "pending" },
|
||||
{ content: "Phase 4: Generate Report", status: "pending" }
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
3. **Execute & Iterate** (Phase 3 cont.)
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:test-cycle-execute")
|
||||
|
||||
// test-cycle-execute handles:
|
||||
// - Execute tests
|
||||
// - Analyze failures
|
||||
// - Generate fix tasks via CLI
|
||||
// - Iterate fixes until pass
|
||||
```
|
||||
|
||||
4. **Generate Report** (Phase 5)
|
||||
|
||||
---
|
||||
|
||||
### Mode 3: Bidirectional Verification
|
||||
|
||||
**Best For**: Complex systems, multi-dimensional analysis
|
||||
|
||||
**Workflow**:
|
||||
```
|
||||
User Input → Session Init → Parallel execution:
|
||||
┌──────────────────────────────┐
|
||||
│ │
|
||||
↓ ↓
|
||||
/workflow:debug-with-file /workflow:test-fix-gen
|
||||
│ │
|
||||
Generate hypotheses & understanding Generate test tasks
|
||||
│ │
|
||||
↓ ↓
|
||||
Apply debug fixes /workflow:test-cycle-execute
|
||||
│ │
|
||||
└──────────────┬───────────────┘
|
||||
↓
|
||||
Phase 4: Merge Findings
|
||||
├─ Converge root cause analyses
|
||||
├─ Identify consistency (mutual validation)
|
||||
├─ Identify conflicts (need coordination)
|
||||
└─ Generate unified report
|
||||
```
|
||||
|
||||
**Execution Steps**:
|
||||
|
||||
1. **Parallel Execution** (Phase 3)
|
||||
```javascript
|
||||
// Start debug
|
||||
const debugTask = SlashCommand(
|
||||
command=`/workflow:debug-with-file "${bug_description}"`,
|
||||
run_in_background=false
|
||||
)
|
||||
|
||||
// Start test generation (synchronous execution, SlashCommand blocks)
|
||||
const testTask = SlashCommand(
|
||||
command=`/workflow:test-fix-gen "${bug_description}"`,
|
||||
run_in_background=false
|
||||
)
|
||||
|
||||
// Execute test cycle
|
||||
const testCycleTask = SlashCommand(
|
||||
command="/workflow:test-cycle-execute",
|
||||
run_in_background=false
|
||||
)
|
||||
```
|
||||
|
||||
2. **Merge Findings** (Phase 4)
|
||||
```javascript
|
||||
// Read debug results
|
||||
const understandingMd = Read(`${debugSessionFolder}/understanding.md`)
|
||||
const hypothesesJson = JSON.parse(Read(`${debugSessionFolder}/hypotheses.json`))
|
||||
|
||||
// Read test results
|
||||
const testResultsJson = JSON.parse(Read(`${testSessionFolder}/.process/test-results.json`))
|
||||
const fixPlanJson = JSON.parse(Read(`${testSessionFolder}/.task/IMPL-002.json`))
|
||||
|
||||
// Merge analysis
|
||||
const convergence = {
|
||||
debug_root_cause: hypothesesJson.confirmed_hypothesis,
|
||||
test_failure_pattern: testResultsJson.failures,
|
||||
consistency: analyzeConsistency(debugRootCause, testFailures),
|
||||
conflicts: identifyConflicts(debugRootCause, testFailures),
|
||||
unified_root_cause: mergeRootCauses(debugRootCause, testFailures),
|
||||
recommended_fix: selectBestFix(debugRootCause, testRootCause)
|
||||
}
|
||||
```
|
||||
|
||||
3. **Generate Report** (Phase 5)
|
||||
```
|
||||
## Bidirectional Verification Workflow Completed
|
||||
|
||||
**Issue**: [bug_description]
|
||||
**Mode**: Bidirectional Verification
|
||||
|
||||
### Debug Findings
|
||||
- Root Cause (hypothesis): [from understanding.md]
|
||||
- Confidence: [from hypotheses.json]
|
||||
- Key code paths: [file:line]
|
||||
|
||||
### Test Findings
|
||||
- Failure pattern: [list of failing tests]
|
||||
- Error type: [error type]
|
||||
- Impact scope: [affected modules]
|
||||
|
||||
### Merged Analysis
|
||||
- ✓ Consistent: Both workflows identified same root cause
|
||||
- ⚠ Conflicts: [list any conflicts]
|
||||
- → Unified Root Cause: [final confirmed root cause]
|
||||
|
||||
### Recommended Fix
|
||||
- Strategy: [selected fix strategy]
|
||||
- Rationale: [why this strategy]
|
||||
- Risks: [known risks]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Command Line Interface
|
||||
|
||||
### Complete Syntax
|
||||
|
||||
```bash
|
||||
/ccw-debug [OPTIONS] <BUG_DESCRIPTION>
|
||||
|
||||
Options:
|
||||
--mode <cli|debug|test|bidirectional> Execution mode (default: debug)
|
||||
--yes, -y Auto mode (skip all confirmations)
|
||||
--hotfix, -h Production hotfix mode (only for debug mode)
|
||||
--no-tests Skip test generation in debug-first mode
|
||||
--skip-report Don't generate final report
|
||||
--resume <session-id> Resume interrupted session
|
||||
|
||||
Arguments:
|
||||
<BUG_DESCRIPTION> Issue description, error message, or .md file path
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# CLI quick mode: fastest, recommendation-only (NEW!)
|
||||
/ccw-debug --mode cli "User login timeout"
|
||||
/ccw-debug --mode cli --yes "Quick fix: API 500 error" # Auto-apply if high confidence
|
||||
|
||||
# Debug first (default)
|
||||
/ccw-debug "User login timeout"
|
||||
|
||||
# Test first
|
||||
/ccw-debug --mode test "Payment validation failure"
|
||||
|
||||
# Bidirectional verification
|
||||
/ccw-debug --mode bidirectional "Multi-module data consistency issue"
|
||||
|
||||
# Hotfix auto mode
|
||||
/ccw-debug --hotfix --yes "API 500 error"
|
||||
|
||||
# Debug first, skip tests
|
||||
/ccw-debug --no-tests "Understand code flow"
|
||||
|
||||
# Resume interrupted session
|
||||
/ccw-debug --resume CCWD-login-timeout-2025-01-27
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Session Structure
|
||||
|
||||
### File Organization
|
||||
|
||||
```
|
||||
.workflow/.ccw-debug/CCWD-{slug}-{date}/
|
||||
├── mode-config.json # Mode configuration and flags
|
||||
├── session-manifest.json # Session index and status
|
||||
├── final-report.md # Final report
|
||||
│
|
||||
├── debug/ # Debug workflow (if mode includes debug)
|
||||
│ ├── debug-session-id.txt
|
||||
│ ├── understanding.md
|
||||
│ ├── hypotheses.json
|
||||
│ └── debug.log
|
||||
│
|
||||
├── test/ # Test workflow (if mode includes test)
|
||||
│ ├── test-session-id.txt
|
||||
│ ├── IMPL_PLAN.md
|
||||
│ ├── test-results.json
|
||||
│ └── iteration-state.json
|
||||
│
|
||||
└── fusion/ # Fusion analysis (bidirectional mode)
|
||||
├── convergence-analysis.json
|
||||
├── consistency-report.md
|
||||
└── unified-root-cause.json
|
||||
```
|
||||
|
||||
### Session State Management
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "CCWD-login-timeout-2025-01-27",
|
||||
"mode": "debug|test|bidirectional",
|
||||
"status": "running|completed|failed|paused",
|
||||
"phases": {
|
||||
"phase_1": { "status": "completed", "timestamp": "..." },
|
||||
"phase_2": { "status": "in_progress", "timestamp": "..." },
|
||||
"phase_3": { "status": "pending" },
|
||||
"phase_4": { "status": "pending" },
|
||||
"phase_5": { "status": "pending" }
|
||||
},
|
||||
"sub_sessions": {
|
||||
"debug_session": "DBG-...",
|
||||
"test_session": "WFS-test-..."
|
||||
},
|
||||
"artifacts": {
|
||||
"debug_understanding": "...",
|
||||
"test_results": "...",
|
||||
"fusion_analysis": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Mode Selection Logic
|
||||
|
||||
### Auto Mode Recommendation
|
||||
|
||||
When user doesn't specify `--mode`, recommend based on input analysis:
|
||||
|
||||
```javascript
|
||||
function recommendMode(bugDescription) {
|
||||
const indicators = {
|
||||
cli_signals: [
|
||||
/quick|fast|simple|immediate/,
|
||||
/recommendation|suggest|advice/,
|
||||
/just need|only want|quick look/,
|
||||
/straightforward|obvious|clear/
|
||||
],
|
||||
debug_signals: [
|
||||
/unclear|don't know|maybe|uncertain|why/,
|
||||
/error|crash|fail|exception|stack trace/,
|
||||
/execution flow|code path|how does/
|
||||
],
|
||||
test_signals: [
|
||||
/test|coverage|verify|pass|fail/,
|
||||
/implementation|implemented|complete/,
|
||||
/case|scenario|should/
|
||||
],
|
||||
complex_signals: [
|
||||
/multiple|all|system|integration/,
|
||||
/module|subsystem|network|distributed/,
|
||||
/concurrent|async|race/
|
||||
]
|
||||
}
|
||||
|
||||
let score = { cli: 0, debug: 0, test: 0, bidirectional: 0 }
|
||||
|
||||
// CLI signals (lightweight preference)
|
||||
for (const pattern of indicators.cli_signals) {
|
||||
if (pattern.test(bugDescription)) score.cli += 3
|
||||
}
|
||||
|
||||
// Debug signals
|
||||
for (const pattern of indicators.debug_signals) {
|
||||
if (pattern.test(bugDescription)) score.debug += 2
|
||||
}
|
||||
|
||||
// Test signals
|
||||
for (const pattern of indicators.test_signals) {
|
||||
if (pattern.test(bugDescription)) score.test += 2
|
||||
}
|
||||
|
||||
// Complex signals (prefer bidirectional for complex issues)
|
||||
for (const pattern of indicators.complex_signals) {
|
||||
if (pattern.test(bugDescription)) {
|
||||
score.bidirectional += 3
|
||||
score.cli -= 2 // Complex issues not suitable for CLI quick
|
||||
}
|
||||
}
|
||||
|
||||
// If description is short and has clear error, prefer CLI
|
||||
if (bugDescription.length < 100 && /error|fail|crash/.test(bugDescription)) {
|
||||
score.cli += 2
|
||||
}
|
||||
|
||||
// Return highest scoring mode
|
||||
return Object.entries(score).sort((a, b) => b[1] - a[1])[0][0]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
### When to Use Each Mode
|
||||
|
||||
| Issue Characteristic | Recommended Mode | Rationale |
|
||||
|----------------------|-----------------|-----------|
|
||||
| Simple error, clear symptoms | CLI Quick | Fastest recommendation |
|
||||
| Incomplete error info, requires exploration | Debug First | Deep diagnostic capability |
|
||||
| Code complete, needs test coverage | Test First | Automated iterative fixes |
|
||||
| Cross-module issue, ambiguous symptoms | Bidirectional | Multi-angle insights |
|
||||
| Production failure, needs immediate guidance | CLI Quick + --yes | Fastest guidance, optional escalation |
|
||||
| Production failure, needs safe fix | Debug First + --hotfix | Minimal diagnosis time |
|
||||
| Want to understand why it failed | Debug First | Records understanding evolution |
|
||||
| Want to ensure all scenarios pass | Test First | Complete coverage-driven |
|
||||
|
||||
### Performance Tips
|
||||
|
||||
- **CLI Quick**: 2-5 minutes, no file I/O, recommendation-only
|
||||
- **Debug First**: Usually requires manual issue reproduction (after logging added), then 15-30 min
|
||||
- **Test First**: Fully automated, 20-45 min depending on test suite size
|
||||
- **Bidirectional**: Most comprehensive but slowest (parallel workflows), 30-60 min
|
||||
|
||||
### Workflow Continuity
|
||||
|
||||
- **CLI Quick**: Can escalate to debug/test/apply fix based on user decision
|
||||
- **Debug First**: Auto-launches test generation and execution after completion
|
||||
- **Test First**: With high failure rates suggests switching to debug mode for root cause
|
||||
- **Bidirectional**: Always executes complete flow
|
||||
|
||||
---
|
||||
|
||||
## Follow-up Expansion
|
||||
|
||||
After completion, offer to expand to issues:
|
||||
|
||||
```
|
||||
## Done! What's next?
|
||||
|
||||
- [ ] Create Test issue (improve test coverage)
|
||||
- [ ] Create Enhancement issue (optimize code quality)
|
||||
- [ ] Create Refactor issue (improve architecture)
|
||||
- [ ] Create Documentation issue (record learnings)
|
||||
- [ ] Don't create any issue, end workflow
|
||||
```
|
||||
|
||||
Selected items call: `/issue:new "{issue summary} - {dimension}"`
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | CLI Quick | Debug First | Test First | Bidirectional |
|
||||
|-------|-----------|-------------|-----------|---------------|
|
||||
| Session creation failed | N/A (no session) | Retry → abort | Retry → abort | Retry → abort |
|
||||
| CLI analysis failed | Retry with fallback tool → manual | N/A | N/A | N/A |
|
||||
| Diagnosis/test failed | N/A | Continue with partial results | Direct failure | Use alternate workflow results |
|
||||
| Low confidence result | Ask escalate or review | N/A | N/A | N/A |
|
||||
| Merge conflicts | N/A | N/A | N/A | Select highest confidence plan |
|
||||
| Fix application failed | Report error, no auto-retry | Request manual fix | Mark failed, request intervention | Try alternative plan |
|
||||
|
||||
---
|
||||
|
||||
## Relationship with ccw Command
|
||||
|
||||
| Feature | ccw | ccw-debug |
|
||||
|---------|-----|----------|
|
||||
| **Design** | General workflow orchestration | Debug + test aggregation |
|
||||
| **Intent Detection** | ✅ Detects task type | ✅ Detects issue type |
|
||||
| **Automation** | ✅ Auto-selects workflow | ✅ Auto-selects mode |
|
||||
| **Quick Mode** | ❌ None | ✅ CLI Quick (2-5 min) |
|
||||
| **Parallel Execution** | ❌ Sequential | ✅ Bidirectional mode parallel |
|
||||
| **Fusion Analysis** | ❌ None | ✅ Bidirectional mode fusion |
|
||||
| **Workflow Scope** | Broad (feature/bugfix/tdd/ui etc.) | Deep focus (debug + test) |
|
||||
| **CLI Integration** | Yes | Yes (4 levels: quick/deep/iterative/fusion) |
|
||||
|
||||
---
|
||||
|
||||
## Usage Recommendations
|
||||
|
||||
1. **First Time**: Use default mode (debug-first), observe workflow
|
||||
2. **Quick Decision**: Use CLI Quick (--mode cli) for immediate recommendations
|
||||
3. **Quick Fix**: Use `--hotfix --yes` for minimal diagnostics (debug mode)
|
||||
4. **Learning**: Use debug-first, read `understanding.md`
|
||||
5. **Complete Validation**: Use bidirectional for multi-dimensional insights
|
||||
6. **Auto Repair**: Use test-first for automatic iteration
|
||||
7. **Escalation**: Start with CLI Quick, escalate to other modes as needed
|
||||
|
||||
---
|
||||
|
||||
## Reference
|
||||
|
||||
### Related Commands
|
||||
|
||||
- `ccw cli` - Direct CLI analysis (used by CLI Quick mode)
|
||||
- `/workflow:debug-with-file` - Deep debug diagnostics
|
||||
- `/workflow:test-fix-gen` - Test generation
|
||||
- `/workflow:test-cycle-execute` - Test execution
|
||||
- `/workflow:lite-fix` - Lightweight fix
|
||||
- `/ccw` - General workflow orchestration
|
||||
|
||||
### Configuration Files
|
||||
|
||||
- `~/.claude/cli-tools.json` - CLI tool configuration (Gemini/Qwen/Codex)
|
||||
- `.workflow/project-tech.json` - Project technology stack
|
||||
- `.workflow/project-guidelines.json` - Project conventions
|
||||
|
||||
### CLI Tool Fallback Chain (for CLI Quick mode)
|
||||
|
||||
When CLI analysis fails, fallback order:
|
||||
1. **Gemini** (primary): `gemini-2.5-pro`
|
||||
2. **Qwen** (fallback): `coder-model`
|
||||
3. **Codex** (fallback): `gpt-5.2`
|
||||
|
||||
---
|
||||
|
||||
## Summary: Mode Selection Decision Tree
|
||||
|
||||
```
|
||||
User calls: /ccw-debug <bug_description>
|
||||
|
||||
┌─ Explicit --mode specified?
|
||||
│ ├─ YES → Use specified mode
|
||||
│ │ ├─ cli → 2-5 min analysis, optionally escalate
|
||||
│ │ ├─ debug → Full debug-with-file workflow
|
||||
│ │ ├─ test → Test-first workflow
|
||||
│ │ └─ bidirectional → Parallel debug + test
|
||||
│ │
|
||||
│ └─ NO → Auto-recommend based on bug description
|
||||
│ ├─ Keywords: "quick", "fast", "simple" → CLI Quick
|
||||
│ ├─ Keywords: "error", "crash", "exception" → Debug First (or CLI if simple)
|
||||
│ ├─ Keywords: "test", "verify", "coverage" → Test First
|
||||
│ └─ Keywords: "multiple", "system", "distributed" → Bidirectional
|
||||
│
|
||||
├─ Check --yes flag
|
||||
│ ├─ YES → Auto-confirm all decisions
|
||||
│ │ ├─ CLI mode: Auto-apply if confidence High
|
||||
│ │ └─ Others: Auto-select default options
|
||||
│ │
|
||||
│ └─ NO → Interactive mode, ask user for confirmations
|
||||
│
|
||||
├─ Check --hotfix flag (debug mode only)
|
||||
│ ├─ YES → Minimal diagnostics, fast fix
|
||||
│ └─ NO → Full analysis
|
||||
│
|
||||
└─ Execute selected mode workflow
|
||||
└─ Return results or escalation options
|
||||
```
|
||||
567
.claude/commands/ccw.md
Normal file
567
.claude/commands/ccw.md
Normal file
@@ -0,0 +1,567 @@
|
||||
---
|
||||
name: ccw
|
||||
description: Main workflow orchestrator - analyze intent, select workflow, execute command chain in main process
|
||||
argument-hint: "\"task description\""
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*)
|
||||
---
|
||||
|
||||
# CCW Command - Main Workflow Orchestrator
|
||||
|
||||
Main process orchestrator: intent analysis → workflow selection → command chain execution.
|
||||
|
||||
## Core Concept: Minimum Execution Units (最小执行单元)
|
||||
|
||||
**Definition**: A set of commands that must execute together as an atomic group to achieve a meaningful workflow milestone.
|
||||
|
||||
**Why This Matters**:
|
||||
- **Prevents Incomplete States**: Avoid stopping after task generation without execution
|
||||
- **User Experience**: User gets complete results, not intermediate artifacts requiring manual follow-up
|
||||
- **Workflow Integrity**: Maintains logical coherence of multi-step operations
|
||||
|
||||
**Key Units in CCW**:
|
||||
|
||||
| Unit Type | Pattern | Example |
|
||||
|-----------|---------|---------|
|
||||
| **Planning + Execution** | plan-cmd → execute-cmd | lite-plan → lite-execute |
|
||||
| **Testing** | test-gen-cmd → test-exec-cmd | test-fix-gen → test-cycle-execute |
|
||||
| **Review** | review-cmd → fix-cmd | review-session-cycle → review-cycle-fix |
|
||||
|
||||
**Atomic Rules**:
|
||||
1. CCW automatically groups commands into minimum units - never splits them
|
||||
2. Pipeline visualization shows units with `【 】` markers
|
||||
3. Error handling preserves unit boundaries (retry/skip affects whole unit)
|
||||
|
||||
## Execution Model
|
||||
|
||||
**Synchronous (Main Process)**: Commands execute via SlashCommand in main process, blocking until complete.
|
||||
|
||||
```
|
||||
User Input → Analyze Intent → Select Workflow → [Confirm] → Execute Chain
|
||||
↓
|
||||
SlashCommand (blocking)
|
||||
↓
|
||||
Update TodoWrite
|
||||
↓
|
||||
Next Command...
|
||||
```
|
||||
|
||||
**vs ccw-coordinator**: External CLI execution with background tasks and hook callbacks.
|
||||
|
||||
## 5-Phase Workflow
|
||||
|
||||
### Phase 1: Analyze Intent
|
||||
|
||||
```javascript
|
||||
function analyzeIntent(input) {
|
||||
return {
|
||||
goal: extractGoal(input),
|
||||
scope: extractScope(input),
|
||||
constraints: extractConstraints(input),
|
||||
task_type: detectTaskType(input), // bugfix|feature|tdd|review|exploration|...
|
||||
complexity: assessComplexity(input), // low|medium|high
|
||||
clarity_score: calculateClarity(input) // 0-3 (>=2 = clear)
|
||||
};
|
||||
}
|
||||
|
||||
// Task type detection (priority order)
|
||||
function detectTaskType(text) {
|
||||
const patterns = {
|
||||
'bugfix-hotfix': /urgent|production|critical/ && /fix|bug/,
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm': /brainstorm|ideation|头脑风暴|创意|发散思维|creative thinking|multi-perspective.*think|compare perspectives|探索.*可能/,
|
||||
'brainstorm-to-issue': /brainstorm.*issue|头脑风暴.*issue|idea.*issue|想法.*issue|从.*头脑风暴|convert.*brainstorm/,
|
||||
'debug-file': /debug.*document|hypothesis.*debug|troubleshoot.*track|investigate.*log|调试.*记录|假设.*验证|systematic debug|深度调试/,
|
||||
'analyze-file': /analyze.*document|explore.*concept|understand.*architecture|investigate.*discuss|collaborative analysis|分析.*讨论|深度.*理解|协作.*分析/,
|
||||
// Standard workflows
|
||||
'bugfix': /fix|bug|error|crash|fail|debug/,
|
||||
'issue-batch': /issues?|batch/ && /fix|resolve/,
|
||||
'issue-transition': /issue workflow|structured workflow|queue|multi-stage/,
|
||||
'exploration': /uncertain|explore|research|what if/,
|
||||
'quick-task': /quick|simple|small/ && /feature|function/,
|
||||
'ui-design': /ui|design|component|style/,
|
||||
'tdd': /tdd|test-driven|test first/,
|
||||
'test-fix': /test fail|fix test|failing test/,
|
||||
'review': /review|code review/,
|
||||
'documentation': /docs|documentation|readme/
|
||||
};
|
||||
for (const [type, pattern] of Object.entries(patterns)) {
|
||||
if (pattern.test(text)) return type;
|
||||
}
|
||||
return 'feature';
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `Type: [task_type] | Goal: [goal] | Complexity: [complexity] | Clarity: [clarity_score]/3`
|
||||
|
||||
---
|
||||
|
||||
### Phase 1.5: Requirement Clarification (if clarity_score < 2)
|
||||
|
||||
```javascript
|
||||
async function clarifyRequirements(analysis) {
|
||||
if (analysis.clarity_score >= 2) return analysis;
|
||||
|
||||
const questions = generateClarificationQuestions(analysis); // Goal, Scope, Constraints
|
||||
const answers = await AskUserQuestion({ questions });
|
||||
return updateAnalysis(analysis, answers);
|
||||
}
|
||||
```
|
||||
|
||||
**Questions**: Goal (Create/Fix/Optimize/Analyze), Scope (Single file/Module/Cross-module/System), Constraints (Backward compat/Skip tests/Urgent hotfix)
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Select Workflow & Build Command Chain
|
||||
|
||||
```javascript
|
||||
function selectWorkflow(analysis) {
|
||||
const levelMap = {
|
||||
'bugfix-hotfix': { level: 2, flow: 'bugfix.hotfix' },
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm': { level: 4, flow: 'brainstorm-with-file' }, // Multi-perspective ideation
|
||||
'brainstorm-to-issue': { level: 4, flow: 'brainstorm-to-issue' }, // Brainstorm → Issue workflow
|
||||
'debug-file': { level: 3, flow: 'debug-with-file' }, // Hypothesis-driven debugging
|
||||
'analyze-file': { level: 3, flow: 'analyze-with-file' }, // Collaborative analysis
|
||||
// Standard workflows
|
||||
'bugfix': { level: 2, flow: 'bugfix.standard' },
|
||||
'issue-batch': { level: 'Issue', flow: 'issue' },
|
||||
'issue-transition': { level: 2.5, flow: 'rapid-to-issue' }, // Bridge workflow
|
||||
'exploration': { level: 4, flow: 'full' },
|
||||
'quick-task': { level: 1, flow: 'lite-lite-lite' },
|
||||
'ui-design': { level: analysis.complexity === 'high' ? 4 : 3, flow: 'ui' },
|
||||
'tdd': { level: 3, flow: 'tdd' },
|
||||
'test-fix': { level: 3, flow: 'test-fix-gen' },
|
||||
'review': { level: 3, flow: 'review-cycle-fix' },
|
||||
'documentation': { level: 2, flow: 'docs' },
|
||||
'feature': { level: analysis.complexity === 'high' ? 3 : 2, flow: analysis.complexity === 'high' ? 'coupled' : 'rapid' }
|
||||
};
|
||||
|
||||
const selected = levelMap[analysis.task_type] || levelMap['feature'];
|
||||
return buildCommandChain(selected, analysis);
|
||||
}
|
||||
|
||||
// Build command chain (port-based matching with Minimum Execution Units)
|
||||
function buildCommandChain(workflow, analysis) {
|
||||
const chains = {
|
||||
// Level 1 - Rapid
|
||||
'lite-lite-lite': [
|
||||
{ cmd: '/workflow:lite-lite-lite', args: `"${analysis.goal}"` }
|
||||
],
|
||||
|
||||
// Level 2 - Lightweight
|
||||
'rapid': [
|
||||
// Unit: Quick Implementation【lite-plan → lite-execute】
|
||||
{ cmd: '/workflow:lite-plan', args: `"${analysis.goal}"`, unit: 'quick-impl' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
// Level 2 Bridge - Lightweight to Issue Workflow
|
||||
'rapid-to-issue': [
|
||||
// Unit: Quick Implementation【lite-plan → convert-to-plan】
|
||||
{ cmd: '/workflow:lite-plan', args: `"${analysis.goal}"`, unit: 'quick-impl-to-issue' },
|
||||
{ cmd: '/issue:convert-to-plan', args: '--latest-lite-plan -y', unit: 'quick-impl-to-issue' },
|
||||
// Auto-continue to issue workflow
|
||||
{ cmd: '/issue:queue', args: '' },
|
||||
{ cmd: '/issue:execute', args: '--queue auto' }
|
||||
],
|
||||
|
||||
'bugfix.standard': [
|
||||
// Unit: Bug Fix【lite-fix → lite-execute】
|
||||
{ cmd: '/workflow:lite-fix', args: `"${analysis.goal}"`, unit: 'bug-fix' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'bug-fix' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'bugfix.hotfix': [
|
||||
{ cmd: '/workflow:lite-fix', args: `--hotfix "${analysis.goal}"` }
|
||||
],
|
||||
|
||||
'multi-cli-plan': [
|
||||
// Unit: Multi-CLI Planning【multi-cli-plan → lite-execute】
|
||||
{ cmd: '/workflow:multi-cli-plan', args: `"${analysis.goal}"`, unit: 'multi-cli' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'multi-cli' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'docs': [
|
||||
// Unit: Quick Implementation【lite-plan → lite-execute】
|
||||
{ cmd: '/workflow:lite-plan', args: `"${analysis.goal}"`, unit: 'quick-impl' },
|
||||
{ cmd: '/workflow:lite-execute', args: '--in-memory', unit: 'quick-impl' }
|
||||
],
|
||||
|
||||
// With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
'brainstorm-with-file': [
|
||||
{ cmd: '/workflow:brainstorm-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Has built-in post-completion options (create plan, create issue, deep analysis)
|
||||
],
|
||||
|
||||
// Brainstorm-to-Issue workflow (bridge from brainstorm to issue execution)
|
||||
'brainstorm-to-issue': [
|
||||
// Note: Assumes brainstorm session already exists, or run brainstorm first
|
||||
{ cmd: '/issue:from-brainstorm', args: `SESSION="${extractBrainstormSession(analysis)}" --auto` },
|
||||
{ cmd: '/issue:queue', args: '' },
|
||||
{ cmd: '/issue:execute', args: '--queue auto' }
|
||||
],
|
||||
|
||||
'debug-with-file': [
|
||||
{ cmd: '/workflow:debug-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained with hypothesis-driven iteration and Gemini validation
|
||||
],
|
||||
|
||||
'analyze-with-file': [
|
||||
{ cmd: '/workflow:analyze-with-file', args: `"${analysis.goal}"` }
|
||||
// Note: Self-contained with multi-round discussion and CLI exploration
|
||||
],
|
||||
|
||||
// Level 3 - Standard
|
||||
'coupled': [
|
||||
// Unit: Verified Planning【plan → plan-verify】
|
||||
{ cmd: '/workflow:plan', args: `"${analysis.goal}"`, unit: 'verified-planning' },
|
||||
{ cmd: '/workflow:plan-verify', args: '', unit: 'verified-planning' },
|
||||
// Execution
|
||||
{ cmd: '/workflow:execute', args: '' },
|
||||
// Unit: Code Review【review-session-cycle → review-cycle-fix】
|
||||
{ cmd: '/workflow:review-session-cycle', args: '', unit: 'code-review' },
|
||||
{ cmd: '/workflow:review-cycle-fix', args: '', unit: 'code-review' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
...(analysis.constraints?.includes('skip-tests') ? [] : [
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
])
|
||||
],
|
||||
|
||||
'tdd': [
|
||||
// Unit: TDD Planning + Execution【tdd-plan → execute】
|
||||
{ cmd: '/workflow:tdd-plan', args: `"${analysis.goal}"`, unit: 'tdd-planning' },
|
||||
{ cmd: '/workflow:execute', args: '', unit: 'tdd-planning' },
|
||||
// TDD Verification
|
||||
{ cmd: '/workflow:tdd-verify', args: '' }
|
||||
],
|
||||
|
||||
'test-fix-gen': [
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: `"${analysis.goal}"`, unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
'review-cycle-fix': [
|
||||
// Unit: Code Review【review-session-cycle → review-cycle-fix】
|
||||
{ cmd: '/workflow:review-session-cycle', args: '', unit: 'code-review' },
|
||||
{ cmd: '/workflow:review-cycle-fix', args: '', unit: 'code-review' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
'ui': [
|
||||
{ cmd: '/workflow:ui-design:explore-auto', args: `"${analysis.goal}"` },
|
||||
// Unit: Planning + Execution【plan → execute】
|
||||
{ cmd: '/workflow:plan', args: '', unit: 'plan-execute' },
|
||||
{ cmd: '/workflow:execute', args: '', unit: 'plan-execute' }
|
||||
],
|
||||
|
||||
// Level 4 - Brainstorm
|
||||
'full': [
|
||||
{ cmd: '/workflow:brainstorm:auto-parallel', args: `"${analysis.goal}"` },
|
||||
// Unit: Verified Planning【plan → plan-verify】
|
||||
{ cmd: '/workflow:plan', args: '', unit: 'verified-planning' },
|
||||
{ cmd: '/workflow:plan-verify', args: '', unit: 'verified-planning' },
|
||||
// Execution
|
||||
{ cmd: '/workflow:execute', args: '' },
|
||||
// Unit: Test Validation【test-fix-gen → test-cycle-execute】
|
||||
{ cmd: '/workflow:test-fix-gen', args: '', unit: 'test-validation' },
|
||||
{ cmd: '/workflow:test-cycle-execute', args: '', unit: 'test-validation' }
|
||||
],
|
||||
|
||||
// Issue Workflow
|
||||
'issue': [
|
||||
{ cmd: '/issue:discover', args: '' },
|
||||
{ cmd: '/issue:plan', args: '--all-pending' },
|
||||
{ cmd: '/issue:queue', args: '' },
|
||||
{ cmd: '/issue:execute', args: '' }
|
||||
]
|
||||
};
|
||||
|
||||
return chains[workflow.flow] || chains['rapid'];
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `Level [X] - [flow] | Pipeline: [...] | Commands: [1. /cmd1 2. /cmd2 ...]`
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: User Confirmation
|
||||
|
||||
```javascript
|
||||
async function getUserConfirmation(chain) {
|
||||
const response = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Execute this command chain?",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "Confirm", description: "Start" },
|
||||
{ label: "Adjust", description: "Modify" },
|
||||
{ label: "Cancel", description: "Abort" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (response.error === "Cancel") throw new Error("Cancelled");
|
||||
if (response.error === "Adjust") return await adjustChain(chain);
|
||||
return chain;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Setup TODO Tracking
|
||||
|
||||
```javascript
|
||||
function setupTodoTracking(chain, workflow) {
|
||||
const todos = chain.map((step, i) => ({
|
||||
content: `CCW:${workflow}: [${i + 1}/${chain.length}] ${step.cmd}`,
|
||||
status: i === 0 ? 'in_progress' : 'pending',
|
||||
activeForm: `Executing ${step.cmd}`
|
||||
}));
|
||||
TodoWrite({ todos });
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `-> CCW:rapid: [1/3] /workflow:lite-plan | CCW:rapid: [2/3] /workflow:lite-execute | ...`
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Execute Command Chain
|
||||
|
||||
```javascript
|
||||
async function executeCommandChain(chain, workflow) {
|
||||
let previousResult = null;
|
||||
|
||||
for (let i = 0; i < chain.length; i++) {
|
||||
try {
|
||||
const fullCommand = assembleCommand(chain[i], previousResult);
|
||||
const result = await SlashCommand({ command: fullCommand });
|
||||
|
||||
previousResult = { ...result, success: true };
|
||||
updateTodoStatus(i, chain.length, workflow, 'completed');
|
||||
|
||||
} catch (error) {
|
||||
const action = await handleError(chain[i], error, i);
|
||||
if (action === 'retry') {
|
||||
i--; // Retry
|
||||
} else if (action === 'abort') {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
// 'skip' - continue
|
||||
}
|
||||
}
|
||||
|
||||
return { success: true, completed: chain.length };
|
||||
}
|
||||
|
||||
// Assemble full command with session/plan parameters
|
||||
function assembleCommand(step, previousResult) {
|
||||
let command = step.cmd;
|
||||
if (step.args) {
|
||||
command += ` ${step.args}`;
|
||||
} else if (previousResult?.session_id) {
|
||||
command += ` --session="${previousResult.session_id}"`;
|
||||
}
|
||||
return command;
|
||||
}
|
||||
|
||||
// Update TODO: mark current as complete, next as in-progress
|
||||
function updateTodoStatus(index, total, workflow, status) {
|
||||
const todos = getAllCurrentTodos();
|
||||
const updated = todos.map(todo => {
|
||||
if (todo.content.startsWith(`CCW:${workflow}:`)) {
|
||||
const stepNum = extractStepIndex(todo.content);
|
||||
if (stepNum === index + 1) return { ...todo, status };
|
||||
if (stepNum === index + 2 && status === 'completed') return { ...todo, status: 'in_progress' };
|
||||
}
|
||||
return todo;
|
||||
});
|
||||
TodoWrite({ todos: updated });
|
||||
}
|
||||
|
||||
// Error handling: Retry/Skip/Abort
|
||||
async function handleError(step, error, index) {
|
||||
const response = await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `${step.cmd} failed: ${error.message}`,
|
||||
header: "Error",
|
||||
options: [
|
||||
{ label: "Retry", description: "Re-execute" },
|
||||
{ label: "Skip", description: "Continue next" },
|
||||
{ label: "Abort", description: "Stop" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
return { Retry: 'retry', Skip: 'skip', Abort: 'abort' }[response.Error] || 'abort';
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow Summary
|
||||
|
||||
```
|
||||
User Input
|
||||
|
|
||||
Phase 1: Analyze Intent
|
||||
|-- Extract: goal, scope, constraints, task_type, complexity, clarity
|
||||
+-- If clarity < 2 -> Phase 1.5: Clarify Requirements
|
||||
|
|
||||
Phase 2: Select Workflow & Build Chain
|
||||
|-- Map task_type -> Level (1/2/3/4/Issue)
|
||||
|-- Select flow based on complexity
|
||||
+-- Build command chain (port-based)
|
||||
|
|
||||
Phase 3: User Confirmation (optional)
|
||||
|-- Show pipeline visualization
|
||||
+-- Allow adjustment
|
||||
|
|
||||
Phase 4: Setup TODO Tracking
|
||||
+-- Create todos with CCW prefix
|
||||
|
|
||||
Phase 5: Execute Command Chain
|
||||
|-- For each command:
|
||||
| |-- Assemble full command
|
||||
| |-- Execute via SlashCommand
|
||||
| |-- Update TODO status
|
||||
| +-- Handle errors (retry/skip/abort)
|
||||
+-- Return workflow result
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pipeline Examples (with Minimum Execution Units)
|
||||
|
||||
**Note**: `【 】` marks Minimum Execution Units - commands execute together as atomic groups.
|
||||
|
||||
| Input | Type | Level | Pipeline (with Units) |
|
||||
|-------|------|-------|-----------------------|
|
||||
| "Add API endpoint" | feature (low) | 2 |【lite-plan → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Fix login timeout" | bugfix | 2 |【lite-fix → lite-execute】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Use issue workflow" | issue-transition | 2.5 |【lite-plan → convert-to-plan】→ queue → execute |
|
||||
| "头脑风暴: 通知系统重构" | brainstorm | 4 | brainstorm-with-file → (built-in post-completion) |
|
||||
| "从头脑风暴创建 issue" | brainstorm-to-issue | 4 | from-brainstorm → queue → execute |
|
||||
| "深度调试 WebSocket 连接断开" | debug-file | 3 | debug-with-file → (hypothesis iteration) |
|
||||
| "协作分析: 认证架构优化" | analyze-file | 3 | analyze-with-file → (multi-round discussion) |
|
||||
| "OAuth2 system" | feature (high) | 3 |【plan → plan-verify】→ execute →【review-session-cycle → review-cycle-fix】→【test-fix-gen → test-cycle-execute】|
|
||||
| "Implement with TDD" | tdd | 3 |【tdd-plan → execute】→ tdd-verify |
|
||||
| "Uncertain: real-time arch" | exploration | 4 | brainstorm:auto-parallel →【plan → plan-verify】→ execute →【test-fix-gen → test-cycle-execute】|
|
||||
|
||||
---
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
1. **Main Process Execution** - Use SlashCommand in main process, no external CLI
|
||||
2. **Intent-Driven** - Auto-select workflow based on task intent
|
||||
3. **Port-Based Chaining** - Build command chain using port matching
|
||||
4. **Minimum Execution Units** - Commands grouped into atomic units, never split (e.g., lite-plan → lite-execute)
|
||||
5. **Progressive Clarification** - Low clarity triggers clarification phase
|
||||
6. **TODO Tracking** - Use CCW prefix to isolate workflow todos
|
||||
7. **Unit-Aware Error Handling** - Retry/skip/abort affects whole unit, not individual commands
|
||||
8. **User Control** - Optional user confirmation at each phase
|
||||
|
||||
---
|
||||
|
||||
## State Management
|
||||
|
||||
**TodoWrite-Based Tracking**: All execution state tracked via TodoWrite with `CCW:` prefix.
|
||||
|
||||
```javascript
|
||||
// Initial state
|
||||
todos = [
|
||||
{ content: "CCW:rapid: [1/3] /workflow:lite-plan", status: "in_progress" },
|
||||
{ content: "CCW:rapid: [2/3] /workflow:lite-execute", status: "pending" },
|
||||
{ content: "CCW:rapid: [3/3] /workflow:test-cycle-execute", status: "pending" }
|
||||
];
|
||||
|
||||
// After command 1 completes
|
||||
todos = [
|
||||
{ content: "CCW:rapid: [1/3] /workflow:lite-plan", status: "completed" },
|
||||
{ content: "CCW:rapid: [2/3] /workflow:lite-execute", status: "in_progress" },
|
||||
{ content: "CCW:rapid: [3/3] /workflow:test-cycle-execute", status: "pending" }
|
||||
];
|
||||
```
|
||||
|
||||
**vs ccw-coordinator**: Extensive state.json with task_id, status transitions, hook callbacks.
|
||||
|
||||
---
|
||||
|
||||
## With-File Workflows
|
||||
|
||||
**With-File workflows** provide documented exploration with multi-CLI collaboration. They are self-contained and generate comprehensive session artifacts.
|
||||
|
||||
| Workflow | Purpose | Key Features | Output Folder |
|
||||
|----------|---------|--------------|---------------|
|
||||
| **brainstorm-with-file** | Multi-perspective ideation | Gemini/Codex/Claude perspectives, diverge-converge cycles | `.workflow/.brainstorm/` |
|
||||
| **debug-with-file** | Hypothesis-driven debugging | Gemini validation, understanding evolution, NDJSON logging | `.workflow/.debug/` |
|
||||
| **analyze-with-file** | Collaborative analysis | Multi-round Q&A, CLI exploration, documented discussions | `.workflow/.analysis/` |
|
||||
|
||||
**Detection Keywords**:
|
||||
- **brainstorm**: 头脑风暴, 创意, 发散思维, multi-perspective, compare perspectives
|
||||
- **debug-file**: 深度调试, 假设验证, systematic debug, hypothesis debug
|
||||
- **analyze-file**: 协作分析, 深度理解, collaborative analysis, explore concept
|
||||
|
||||
**Characteristics**:
|
||||
1. **Self-Contained**: Each workflow handles its own iteration loop
|
||||
2. **Documented Process**: Creates evolving documents (brainstorm.md, understanding.md, discussion.md)
|
||||
3. **Multi-CLI**: Uses Gemini/Codex/Claude for different perspectives
|
||||
4. **Built-in Post-Completion**: Offers follow-up options (create plan, issue, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Type Comparison: ccw vs ccw-coordinator
|
||||
|
||||
| Aspect | ccw | ccw-coordinator |
|
||||
|--------|-----|-----------------|
|
||||
| **Type** | Main process (SlashCommand) | External CLI (ccw cli + hook callbacks) |
|
||||
| **Execution** | Synchronous blocking | Async background with hook completion |
|
||||
| **Workflow** | Auto intent-based selection | Manual chain building |
|
||||
| **Intent Analysis** | 5-phase clarity check | 3-phase requirement analysis |
|
||||
| **State** | TodoWrite only (in-memory) | state.json + checkpoint/resume |
|
||||
| **Error Handling** | Retry/skip/abort (interactive) | Retry/skip/abort (via AskUser) |
|
||||
| **Use Case** | Auto workflow for any task | Manual orchestration, large chains |
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Auto-select workflow
|
||||
ccw "Add user authentication"
|
||||
|
||||
# Complex requirement (triggers clarification)
|
||||
ccw "Optimize system performance"
|
||||
|
||||
# Bug fix
|
||||
ccw "Fix memory leak in WebSocket handler"
|
||||
|
||||
# TDD development
|
||||
ccw "Implement user registration with TDD"
|
||||
|
||||
# Exploratory task
|
||||
ccw "Uncertain about architecture for real-time notifications"
|
||||
|
||||
# With-File workflows (documented exploration with multi-CLI collaboration)
|
||||
ccw "头脑风暴: 用户通知系统重新设计" # → brainstorm-with-file
|
||||
ccw "从头脑风暴 BS-通知系统-2025-01-28 创建 issue" # → brainstorm-to-issue (bridge)
|
||||
ccw "深度调试: 系统随机崩溃问题" # → debug-with-file
|
||||
ccw "协作分析: 理解现有认证架构的设计决策" # → analyze-with-file
|
||||
```
|
||||
@@ -1,87 +0,0 @@
|
||||
---
|
||||
name: analyze
|
||||
description: Read-only codebase analysis using Gemini (default), Qwen, or Codex with auto-pattern detection and template selection
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] analysis target"
|
||||
allowed-tools: SlashCommand(*), Bash(*), TodoWrite(*), Read(*), Glob(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Analyze Command (/cli:analyze)
|
||||
|
||||
## Purpose
|
||||
|
||||
Quick codebase analysis using CLI tools. **Read-only - does NOT modify code**.
|
||||
|
||||
**Tool Selection**:
|
||||
- **gemini** (default) - Best for code analysis
|
||||
- **qwen** - Fallback when Gemini unavailable
|
||||
- **codex** - Alternative for deep analysis
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <gemini|qwen|codex>` - Tool selection (default: gemini)
|
||||
- `--enhance` - Use `/enhance-prompt` for context-aware enhancement
|
||||
- `<analysis-target>` - Description of what to analyze
|
||||
|
||||
## Tool Usage
|
||||
|
||||
**Gemini** (Primary):
|
||||
```bash
|
||||
--tool gemini # or omit (default)
|
||||
```
|
||||
|
||||
**Qwen** (Fallback):
|
||||
```bash
|
||||
--tool qwen
|
||||
```
|
||||
|
||||
**Codex** (Alternative):
|
||||
```bash
|
||||
--tool codex
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated analysis:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Codebase analysis with pattern detection",
|
||||
prompt=`
|
||||
Task: ${analysis_target}
|
||||
Mode: analyze
|
||||
Tool: ${tool_flag || 'gemini'}
|
||||
Enhance: ${enhance_flag}
|
||||
|
||||
Execute codebase analysis with auto-pattern detection:
|
||||
|
||||
1. Context Discovery:
|
||||
- Extract keywords from analysis target
|
||||
- Auto-detect file patterns (auth→auth files, component→components, etc.)
|
||||
- Discover additional relevant files using MCP
|
||||
- Build comprehensive file context
|
||||
|
||||
2. Template Selection:
|
||||
- Auto-select analysis template based on keywords
|
||||
- Apply appropriate analysis methodology
|
||||
- Include @CLAUDE.md for project context
|
||||
|
||||
3. CLI Command Construction:
|
||||
- Tool: ${tool_flag || 'gemini'} (qwen fallback, codex for deep analysis)
|
||||
- Context: @CLAUDE.md + auto-detected patterns + discovered files
|
||||
- Mode: analysis (read-only)
|
||||
- Expected: Insights, recommendations, pattern analysis
|
||||
|
||||
4. Execution & Output:
|
||||
- Execute CLI tool with assembled context
|
||||
- Generate comprehensive analysis report
|
||||
- Save to .workflow/WFS-[id]/.chat/analyze-[timestamp].md (or .scratchpad/)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Core Rules
|
||||
|
||||
- **Read-only**: Analyzes code, does NOT modify files
|
||||
- **Auto-pattern**: Detects file patterns from keywords (auth→auth files, component→components, API→api/routes, test→test files)
|
||||
- **Output**: `.workflow/WFS-[id]/.chat/analyze-[timestamp].md` (or `.scratchpad/` if no session)
|
||||
@@ -1,82 +0,0 @@
|
||||
---
|
||||
name: chat
|
||||
description: Read-only Q&A interaction with Gemini/Qwen/Codex for codebase questions with automatic context inference
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] inquiry"
|
||||
allowed-tools: SlashCommand(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Chat Command (/cli:chat)
|
||||
|
||||
## Purpose
|
||||
|
||||
Direct Q&A interaction with CLI tools for codebase analysis. **Read-only - does NOT modify code**.
|
||||
|
||||
**Tool Selection**:
|
||||
- **gemini** (default) - Best for Q&A and explanations
|
||||
- **qwen** - Fallback when Gemini unavailable
|
||||
- **codex** - Alternative for technical deep-dives
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <gemini|qwen|codex>` - Tool selection (default: gemini)
|
||||
- `--enhance` - Enhance inquiry with `/enhance-prompt`
|
||||
- `<inquiry>` (Required) - Question or analysis request
|
||||
|
||||
## Tool Usage
|
||||
|
||||
**Gemini** (Primary):
|
||||
```bash
|
||||
--tool gemini # or omit (default)
|
||||
```
|
||||
|
||||
**Qwen** (Fallback):
|
||||
```bash
|
||||
--tool qwen
|
||||
```
|
||||
|
||||
**Codex** (Alternative):
|
||||
```bash
|
||||
--tool codex
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated Q&A:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Codebase Q&A with intelligent context discovery",
|
||||
prompt=`
|
||||
Task: ${inquiry}
|
||||
Mode: chat
|
||||
Tool: ${tool_flag || 'gemini'}
|
||||
Enhance: ${enhance_flag}
|
||||
|
||||
Execute codebase Q&A with intelligent context discovery:
|
||||
|
||||
1. Context Discovery:
|
||||
- Parse inquiry to identify relevant topics/keywords
|
||||
- Discover related files using MCP/ripgrep (prioritize precision)
|
||||
- Include @CLAUDE.md + discovered files
|
||||
- Validate context relevance to question
|
||||
|
||||
2. CLI Command Construction:
|
||||
- Tool: ${tool_flag || 'gemini'} (qwen fallback, codex for deep dives)
|
||||
- Context: @CLAUDE.md + discovered file patterns
|
||||
- Mode: analysis (read-only)
|
||||
- Expected: Clear, accurate answer with code references
|
||||
|
||||
3. Execution & Output:
|
||||
- Execute CLI tool with assembled context
|
||||
- Validate answer completeness
|
||||
- Save to .workflow/WFS-[id]/.chat/chat-[timestamp].md (or .scratchpad/)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Core Rules
|
||||
|
||||
- **Read-only**: Provides answers, does NOT modify code
|
||||
- **Context**: `@CLAUDE.md` + inferred or all files (`@**/*`)
|
||||
- **Output**: `.workflow/WFS-[id]/.chat/chat-[timestamp].md` (or `.scratchpad/` if no session)
|
||||
@@ -3,6 +3,7 @@ name: cli-init
|
||||
description: Generate .gemini/ and .qwen/ config directories with settings.json and ignore files based on workspace technology detection
|
||||
argument-hint: "[--tool gemini|qwen|all] [--output path] [--preview]"
|
||||
allowed-tools: Bash(*), Read(*), Write(*), Glob(*)
|
||||
group: cli
|
||||
---
|
||||
|
||||
# CLI Initialization Command (/cli:cli-init)
|
||||
@@ -191,7 +192,7 @@ target/
|
||||
### Step 2: Workspace Analysis (MANDATORY FIRST)
|
||||
```bash
|
||||
# Analyze workspace structure
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh json)
|
||||
bash(ccw tool exec get_modules_by_depth '{"format":"json"}')
|
||||
```
|
||||
|
||||
### Step 3: Technology Detection
|
||||
@@ -428,15 +429,6 @@ docker-compose.override.yml
|
||||
/cli:cli-init --tool all --preview
|
||||
```
|
||||
|
||||
## Key Benefits
|
||||
|
||||
- **Automatic Detection**: No manual configuration needed
|
||||
- **Multi-Tool Support**: Configure Gemini and Qwen simultaneously
|
||||
- **Technology Aware**: Rules adapted to actual project stack
|
||||
- **Maintainable**: Clear sections for easy customization
|
||||
- **Consistent**: Follows gitignore syntax standards
|
||||
- **Safe**: Creates backups of existing files
|
||||
- **Flexible**: Initialize specific tools or all at once
|
||||
|
||||
## Tool Selection Guide
|
||||
|
||||
|
||||
@@ -1,519 +0,0 @@
|
||||
---
|
||||
name: codex-execute
|
||||
description: Multi-stage Codex execution with automatic task decomposition into grouped subtasks using resume mechanism for context continuity
|
||||
argument-hint: "[--verify-git] task description or task-id"
|
||||
allowed-tools: SlashCommand(*), Bash(*), TodoWrite(*), Read(*), Glob(*)
|
||||
---
|
||||
|
||||
# CLI Codex Execute Command (/cli:codex-execute)
|
||||
|
||||
## Purpose
|
||||
|
||||
Automated task decomposition and sequential execution with Codex, using `codex exec "..." resume --last` mechanism for continuity between subtasks.
|
||||
|
||||
**Input**: User description or task ID (automatically loads from `.task/[ID].json` if applicable)
|
||||
|
||||
## Core Workflow
|
||||
|
||||
```
|
||||
Task Input → Analyze Dependencies → Create Task Flow Diagram →
|
||||
Decompose into Subtask Groups → TodoWrite Tracking →
|
||||
For Each Subtask Group:
|
||||
For First Subtask in Group:
|
||||
0. Stage existing changes (git add -A) if valid git repo
|
||||
1. Execute with Codex (new session)
|
||||
2. [Optional] Git verification
|
||||
3. Mark complete in TodoWrite
|
||||
For Related Subtasks in Same Group:
|
||||
0. Stage changes from previous subtask
|
||||
1. Execute with `codex exec "..." resume --last` (continue session)
|
||||
2. [Optional] Git verification
|
||||
3. Mark complete in TodoWrite
|
||||
→ Final Summary
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
- `<input>` (Required): Task description or task ID (e.g., "implement auth" or "IMPL-001")
|
||||
- If input matches task ID format, loads from `.task/[ID].json`
|
||||
- Otherwise, uses input as task description
|
||||
- `--verify-git` (Optional): Verify git status after each subtask completion
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Phase 1: Input Processing & Task Flow Analysis
|
||||
|
||||
1. **Parse Input**:
|
||||
- Check if input matches task ID pattern (e.g., `IMPL-001`, `TASK-123`)
|
||||
- If yes: Load from `.task/[ID].json` and extract requirements
|
||||
- If no: Use input as task description directly
|
||||
|
||||
2. **Analyze Dependencies & Create Task Flow Diagram**:
|
||||
- Analyze task complexity and scope
|
||||
- Identify dependencies and relationships between subtasks
|
||||
- Create visual task flow diagram showing:
|
||||
- Independent task groups (parallel execution possible)
|
||||
- Sequential dependencies (must use resume)
|
||||
- Branching logic (conditional paths)
|
||||
- Display flow diagram for user review
|
||||
|
||||
**Task Flow Diagram Format**:
|
||||
```
|
||||
[Group A: Auth Core]
|
||||
A1: Create user model ──┐
|
||||
A2: Add validation ─┤─► [resume] ─► A3: Database schema
|
||||
│
|
||||
[Group B: API Layer] │
|
||||
B1: Auth endpoints ─────┘─► [new session]
|
||||
B2: Middleware ────────────► [resume] ─► B3: Error handling
|
||||
|
||||
[Group C: Testing]
|
||||
C1: Unit tests ─────────────► [new session]
|
||||
C2: Integration tests ──────► [resume]
|
||||
```
|
||||
|
||||
**Diagram Symbols**:
|
||||
- `──►` Sequential dependency (must resume previous session)
|
||||
- `─┐` Branch point (multiple paths)
|
||||
- `─┘` Merge point (wait for completion)
|
||||
- `[resume]` Use `codex exec "..." resume --last`
|
||||
- `[new session]` Start fresh Codex session
|
||||
|
||||
3. **Decompose into Subtask Groups**:
|
||||
- Group related subtasks that share context
|
||||
- Break down into 3-8 subtasks total
|
||||
- Assign each subtask to a group
|
||||
- Create TodoWrite tracker with groups
|
||||
- Display decomposition for user review
|
||||
|
||||
**Decomposition Criteria**:
|
||||
- Each subtask: 5-15 minutes completable
|
||||
- Clear, testable outcomes
|
||||
- Explicit dependencies
|
||||
- Focused file scope (1-5 files per subtask)
|
||||
- **Group coherence**: Subtasks in same group share context/files
|
||||
|
||||
### File Discovery for Task Decomposition
|
||||
|
||||
Use `rg` or MCP tools to discover relevant files, then group by domain:
|
||||
|
||||
**Workflow**: Discover → Analyze scope → Group by files → Create task flow
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
# Discover files
|
||||
rg "authentication" --files-with-matches --type ts
|
||||
|
||||
# Group by domain
|
||||
# Group A: src/auth/model.ts, src/auth/schema.ts
|
||||
# Group B: src/api/auth.ts, src/middleware/auth.ts
|
||||
# Group C: tests/auth/*.test.ts
|
||||
|
||||
# Each group becomes a session with related subtasks
|
||||
```
|
||||
|
||||
File patterns: see intelligent-tools-strategy.md (loaded in memory)
|
||||
|
||||
### Phase 2: Group-Based Execution
|
||||
|
||||
**Pre-Execution Git Staging** (if valid git repository):
|
||||
```bash
|
||||
# Stage all current changes before codex execution
|
||||
# This makes codex changes clearly visible in git diff
|
||||
git add -A
|
||||
git status --short
|
||||
```
|
||||
|
||||
**For First Subtask in Each Group** (New Session):
|
||||
```bash
|
||||
# Start new Codex session for independent task group
|
||||
codex -C [dir] --full-auto exec "
|
||||
PURPOSE: [group goal]
|
||||
TASK: [subtask description - first in group]
|
||||
CONTEXT: @{relevant_files} @CLAUDE.md
|
||||
EXPECTED: [specific deliverables]
|
||||
RULES: [constraints]
|
||||
Group [X]: [group name] - Subtask 1 of N in this group
|
||||
" --skip-git-repo-check -s danger-full-access
|
||||
```
|
||||
|
||||
**For Related Subtasks in Same Group** (Resume Session):
|
||||
```bash
|
||||
# Stage changes from previous subtask (if valid git repository)
|
||||
git add -A
|
||||
|
||||
# Resume session ONLY for subtasks in same group
|
||||
codex exec "
|
||||
CONTINUE IN SAME GROUP:
|
||||
Group [X]: [group name] - Subtask N of M
|
||||
|
||||
PURPOSE: [continuation goal within group]
|
||||
TASK: [subtask N description]
|
||||
CONTEXT: Previous work in this group completed, now focus on @{new_relevant_files}
|
||||
EXPECTED: [specific deliverables]
|
||||
RULES: Build on previous subtask in group, maintain consistency
|
||||
" resume --last --skip-git-repo-check -s danger-full-access
|
||||
```
|
||||
|
||||
**For First Subtask in Different Group** (New Session):
|
||||
```bash
|
||||
# Stage changes from previous group
|
||||
git add -A
|
||||
|
||||
# Start NEW session for different group (no resume)
|
||||
codex -C [dir] --full-auto exec "
|
||||
PURPOSE: [new group goal]
|
||||
TASK: [subtask description - first in new group]
|
||||
CONTEXT: @{different_files} @CLAUDE.md
|
||||
EXPECTED: [specific deliverables]
|
||||
RULES: [constraints]
|
||||
Group [Y]: [new group name] - Subtask 1 of N in this group
|
||||
" --skip-git-repo-check -s danger-full-access
|
||||
```
|
||||
|
||||
**Resume Decision Logic**:
|
||||
```
|
||||
if (subtask.group == previous_subtask.group):
|
||||
use `codex exec "..." resume --last` # Continue session
|
||||
else:
|
||||
use `codex -C [dir] exec "..."` # New session
|
||||
```
|
||||
|
||||
### Phase 3: Verification (if --verify-git enabled)
|
||||
|
||||
After each subtask completion:
|
||||
```bash
|
||||
# Check git status
|
||||
git status --short
|
||||
|
||||
# Verify expected changes
|
||||
git diff --stat
|
||||
|
||||
# Optional: Check for untracked files that should be committed
|
||||
git ls-files --others --exclude-standard
|
||||
```
|
||||
|
||||
**Verification Checks**:
|
||||
- Files modified match subtask scope
|
||||
- No unexpected changes in unrelated files
|
||||
- No merge conflicts or errors
|
||||
- Code compiles/runs (if applicable)
|
||||
|
||||
### Phase 4: TodoWrite Tracking with Groups
|
||||
|
||||
**Initial Setup with Task Flow**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
// Display task flow diagram first
|
||||
{ content: "Task Flow Analysis Complete - See diagram above", status: "completed", activeForm: "Analyzing task flow" },
|
||||
|
||||
// Group A subtasks (will use resume within group)
|
||||
{ content: "[Group A] Subtask 1: [description]", status: "in_progress", activeForm: "Executing Group A subtask 1" },
|
||||
{ content: "[Group A] Subtask 2: [description] [resume]", status: "pending", activeForm: "Executing Group A subtask 2" },
|
||||
|
||||
// Group B subtasks (new session, then resume within group)
|
||||
{ content: "[Group B] Subtask 1: [description] [new session]", status: "pending", activeForm: "Executing Group B subtask 1" },
|
||||
{ content: "[Group B] Subtask 2: [description] [resume]", status: "pending", activeForm: "Executing Group B subtask 2" },
|
||||
|
||||
// Group C subtasks (new session)
|
||||
{ content: "[Group C] Subtask 1: [description] [new session]", status: "pending", activeForm: "Executing Group C subtask 1" },
|
||||
|
||||
{ content: "Final verification and summary", status: "pending", activeForm: "Verifying and summarizing" }
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**After Each Subtask**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Task Flow Analysis Complete - See diagram above", status: "completed", activeForm: "Analyzing task flow" },
|
||||
{ content: "[Group A] Subtask 1: [description]", status: "completed", activeForm: "Executing Group A subtask 1" },
|
||||
{ content: "[Group A] Subtask 2: [description] [resume]", status: "in_progress", activeForm: "Executing Group A subtask 2" },
|
||||
// ... update status
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
## Codex Resume Mechanism
|
||||
|
||||
**Why Group-Based Resume?**
|
||||
- **Within Group**: Maintains conversation context for related subtasks
|
||||
- Codex remembers previous decisions and patterns
|
||||
- Reduces context repetition
|
||||
- Ensures consistency in implementation style
|
||||
- **Between Groups**: Fresh session for independent tasks
|
||||
- Avoids context pollution from unrelated work
|
||||
- Prevents confusion when switching domains
|
||||
- Maintains focused attention on current group
|
||||
|
||||
**How It Works**:
|
||||
1. **First subtask in Group A**: Creates new Codex session
|
||||
2. **Subsequent subtasks in Group A**: Use `codex resume --last` to continue session
|
||||
3. **First subtask in Group B**: Creates NEW Codex session (no resume)
|
||||
4. **Subsequent subtasks in Group B**: Use `codex resume --last` within Group B
|
||||
5. Each group builds on its own context, isolated from other groups
|
||||
|
||||
**When to Resume vs New Session**:
|
||||
```
|
||||
RESUME (same group):
|
||||
- Subtasks share files/modules
|
||||
- Logical continuation of previous work
|
||||
- Same architectural domain
|
||||
|
||||
NEW SESSION (different group):
|
||||
- Independent task area
|
||||
- Different files/modules
|
||||
- Switching architectural domains
|
||||
- Testing after implementation
|
||||
```
|
||||
|
||||
**Image Support**:
|
||||
```bash
|
||||
# First subtask with design reference
|
||||
codex -C [dir] -i design.png --full-auto exec "..." --skip-git-repo-check -s danger-full-access
|
||||
|
||||
# Resume for next subtask (image context preserved)
|
||||
codex exec "CONTINUE TO NEXT SUBTASK: ..." resume --last --skip-git-repo-check -s danger-full-access
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
**Subtask Failure**:
|
||||
1. Mark subtask as blocked in TodoWrite
|
||||
2. Report error details to user
|
||||
3. Pause execution for manual intervention
|
||||
4. Use AskUserQuestion for recovery decision:
|
||||
|
||||
```typescript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Codex execution failed for the subtask. How should the workflow proceed?",
|
||||
header: "Recovery",
|
||||
options: [
|
||||
{ label: "Retry Subtask", description: "Attempt to execute the same subtask again." },
|
||||
{ label: "Skip Subtask", description: "Continue to the next subtask in the plan." },
|
||||
{ label: "Abort Workflow", description: "Stop the entire execution." }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**Git Verification Failure** (if --verify-git):
|
||||
1. Show unexpected changes
|
||||
2. Pause execution
|
||||
3. Request user decision:
|
||||
- Continue anyway
|
||||
- Rollback and retry
|
||||
- Manual fix
|
||||
|
||||
**Codex Session Lost**:
|
||||
1. Detect if `codex exec "..." resume --last` fails
|
||||
2. Attempt retry with fresh session
|
||||
3. Report to user if manual intervention needed
|
||||
|
||||
## Output Format
|
||||
|
||||
**During Execution**:
|
||||
```
|
||||
Task Flow Diagram:
|
||||
[Group A: Auth Core]
|
||||
A1: Create user model ──┐
|
||||
A2: Add validation ─┤─► [resume] ─► A3: Database schema
|
||||
│
|
||||
[Group B: API Layer] │
|
||||
B1: Auth endpoints ─────┘─► [new session]
|
||||
B2: Middleware ────────────► [resume] ─► B3: Error handling
|
||||
|
||||
[Group C: Testing]
|
||||
C1: Unit tests ─────────────► [new session]
|
||||
C2: Integration tests ──────► [resume]
|
||||
|
||||
Task Decomposition:
|
||||
[Group A] 1. Create user model
|
||||
[Group A] 2. Add validation logic [resume]
|
||||
[Group A] 3. Implement database schema [resume]
|
||||
[Group B] 4. Create auth endpoints [new session]
|
||||
[Group B] 5. Add middleware [resume]
|
||||
[Group B] 6. Error handling [resume]
|
||||
[Group C] 7. Unit tests [new session]
|
||||
[Group C] 8. Integration tests [resume]
|
||||
|
||||
[Group A] Executing Subtask 1/8: Create user model
|
||||
Starting new Codex session for Group A...
|
||||
[Codex output]
|
||||
Subtask 1 completed
|
||||
|
||||
Git Verification:
|
||||
M src/models/user.ts
|
||||
Changes verified
|
||||
|
||||
[Group A] Executing Subtask 2/8: Add validation logic
|
||||
Resuming Codex session (same group)...
|
||||
[Codex output]
|
||||
Subtask 2 completed
|
||||
|
||||
[Group B] Executing Subtask 4/8: Create auth endpoints
|
||||
Starting NEW Codex session for Group B...
|
||||
[Codex output]
|
||||
Subtask 4 completed
|
||||
...
|
||||
|
||||
All Subtasks Completed
|
||||
Summary: [file references, changes, next steps]
|
||||
```
|
||||
|
||||
**Final Summary**:
|
||||
```markdown
|
||||
# Task Execution Summary: [Task Description]
|
||||
|
||||
## Subtasks Completed
|
||||
1. [Subtask 1]: [files modified]
|
||||
2. [Subtask 2]: [files modified]
|
||||
...
|
||||
|
||||
## Files Modified
|
||||
- src/file1.ts:10-50 - [changes]
|
||||
- src/file2.ts - [changes]
|
||||
|
||||
## Git Status
|
||||
- N files modified
|
||||
- M files added
|
||||
- No conflicts
|
||||
|
||||
## Next Steps
|
||||
- [Suggested follow-up actions]
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
**Example 1: Simple Task with Groups**
|
||||
```bash
|
||||
/cli:codex-execute "implement user authentication system"
|
||||
|
||||
# Task Flow Diagram:
|
||||
# [Group A: Data Layer]
|
||||
# A1: Create user model ──► [resume] ──► A2: Database schema
|
||||
#
|
||||
# [Group B: Auth Logic]
|
||||
# B1: JWT token generation ──► [new session]
|
||||
# B2: Authentication middleware ──► [resume]
|
||||
#
|
||||
# [Group C: API Endpoints]
|
||||
# C1: Login/logout endpoints ──► [new session]
|
||||
#
|
||||
# [Group D: Testing]
|
||||
# D1: Unit tests ──► [new session]
|
||||
# D2: Integration tests ──► [resume]
|
||||
|
||||
# Execution:
|
||||
# Group A: A1 (new) → A2 (resume)
|
||||
# Group B: B1 (new) → B2 (resume)
|
||||
# Group C: C1 (new)
|
||||
# Group D: D1 (new) → D2 (resume)
|
||||
```
|
||||
|
||||
**Example 2: With Git Verification**
|
||||
```bash
|
||||
/cli:codex-execute --verify-git "refactor API layer to use dependency injection"
|
||||
|
||||
# After each subtask, verifies:
|
||||
# - Only expected files modified
|
||||
# - No breaking changes in unrelated code
|
||||
# - Tests still pass
|
||||
```
|
||||
|
||||
**Example 3: With Task ID**
|
||||
```bash
|
||||
/cli:codex-execute IMPL-001
|
||||
|
||||
# Loads task from .task/IMPL-001.json
|
||||
# Decomposes based on task requirements
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Task Flow First**: Always create visual flow diagram before execution
|
||||
2. **Group Related Work**: Cluster subtasks by domain/files for efficient resume
|
||||
3. **Subtask Granularity**: Keep subtasks small and focused (5-15 min each)
|
||||
4. **Clear Boundaries**: Each subtask should have well-defined input/output
|
||||
5. **Git Hygiene**: Use `--verify-git` for critical refactoring
|
||||
6. **Pre-Execution Staging**: Stage changes before each subtask to clearly see codex modifications
|
||||
7. **Smart Resume**: Use `resume --last` ONLY within same group
|
||||
8. **Fresh Sessions**: Start new session when switching to different group/domain
|
||||
9. **Recovery Points**: TodoWrite with group labels provides clear progress tracking
|
||||
10. **Image References**: Attach design files for UI tasks (first subtask in group)
|
||||
|
||||
## Input Processing
|
||||
|
||||
**Automatic Detection**:
|
||||
- Input matches task ID pattern → Load from `.task/[ID].json`
|
||||
- Otherwise → Use as task description
|
||||
|
||||
**Task JSON Structure** (when loading from file):
|
||||
```json
|
||||
{
|
||||
"task_id": "IMPL-001",
|
||||
"title": "Implement user authentication",
|
||||
"description": "Create JWT-based auth system",
|
||||
"acceptance_criteria": [...],
|
||||
"scope": {...},
|
||||
"brainstorming_refs": [...]
|
||||
}
|
||||
```
|
||||
|
||||
## Output Routing
|
||||
|
||||
**Execution Log Destination**:
|
||||
- **IF** active workflow session exists:
|
||||
- Execution log: `.workflow/WFS-[id]/.chat/codex-execute-[timestamp].md`
|
||||
- Task summaries: `.workflow/WFS-[id]/.summaries/[TASK-ID]-summary.md` (if task ID)
|
||||
- Task updates: `.workflow/WFS-[id]/.task/[TASK-ID].json` status updates
|
||||
- TodoWrite tracking: Embedded in execution log
|
||||
- **ELSE** (no active session):
|
||||
- **Recommended**: Create workflow session first (`/workflow:session:start`)
|
||||
- **Alternative**: Save to `.workflow/.scratchpad/codex-execute-[description]-[timestamp].md`
|
||||
|
||||
**Output Files** (during execution):
|
||||
```
|
||||
.workflow/WFS-[session-id]/
|
||||
├── .chat/
|
||||
│ └── codex-execute-20250105-143022.md # Full execution log with task flow
|
||||
├── .summaries/
|
||||
│ ├── IMPL-001.1-summary.md # Subtask summaries
|
||||
│ ├── IMPL-001.2-summary.md
|
||||
│ └── IMPL-001-summary.md # Final task summary
|
||||
└── .task/
|
||||
├── IMPL-001.json # Updated task status
|
||||
└── [subtask JSONs if decomposed]
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
- During session `WFS-auth-system`, executing multi-stage auth implementation:
|
||||
- Log: `.workflow/WFS-auth-system/.chat/codex-execute-20250105-143022.md`
|
||||
- Summaries: `.workflow/WFS-auth-system/.summaries/IMPL-001.{1,2,3}-summary.md`
|
||||
- Task status: `.workflow/WFS-auth-system/.task/IMPL-001.json` (status: completed)
|
||||
- No session, ad-hoc multi-stage task:
|
||||
- Log: `.workflow/.scratchpad/codex-execute-auth-refactor-20250105-143045.md`
|
||||
|
||||
**Save Results**:
|
||||
- Execution log with task flow diagram and TodoWrite tracking
|
||||
- Individual summaries for each completed subtask
|
||||
- Final consolidated summary when all subtasks complete
|
||||
- Modified code files throughout project
|
||||
|
||||
## Notes
|
||||
|
||||
**vs. `/cli:execute`**:
|
||||
- `/cli:execute`: Single-shot execution with Gemini/Qwen/Codex
|
||||
- `/cli:codex-execute`: Multi-stage Codex execution with automatic task decomposition and resume mechanism
|
||||
|
||||
**Input Flexibility**: Accepts both freeform descriptions and task IDs (auto-detects and loads JSON)
|
||||
|
||||
**Context Window**: `codex exec "..." resume --last` maintains conversation history, ensuring consistency across subtasks without redundant context injection.
|
||||
|
||||
**Output Details**:
|
||||
- Session management: see intelligent-tools-strategy.md
|
||||
- **⚠️ Code Modification**: This command performs multi-stage code modifications - execution log tracks all changes
|
||||
361
.claude/commands/cli/codex-review.md
Normal file
361
.claude/commands/cli/codex-review.md
Normal file
@@ -0,0 +1,361 @@
|
||||
---
|
||||
name: codex-review
|
||||
description: Interactive code review using Codex CLI via ccw endpoint with configurable review target, model, and custom instructions
|
||||
argument-hint: "[--uncommitted|--base <branch>|--commit <sha>] [--model <model>] [--title <title>] [prompt]"
|
||||
allowed-tools: Bash(*), AskUserQuestion(*), Read(*)
|
||||
---
|
||||
|
||||
# Codex Review Command (/cli:codex-review)
|
||||
|
||||
## Overview
|
||||
Interactive code review command that invokes `codex review` via ccw cli endpoint with guided parameter selection.
|
||||
|
||||
**Codex Review Parameters** (from `codex review --help`):
|
||||
| Parameter | Description |
|
||||
|-----------|-------------|
|
||||
| `[PROMPT]` | Custom review instructions (positional) |
|
||||
| `-c model=<model>` | Override model via config |
|
||||
| `--uncommitted` | Review staged, unstaged, and untracked changes |
|
||||
| `--base <BRANCH>` | Review changes against base branch |
|
||||
| `--commit <SHA>` | Review changes introduced by a commit |
|
||||
| `--title <TITLE>` | Optional commit title for review summary |
|
||||
|
||||
## Prompt Template Format
|
||||
|
||||
Follow the standard ccw cli prompt template:
|
||||
|
||||
```
|
||||
PURPOSE: [what] + [why] + [success criteria] + [constraints/scope]
|
||||
TASK: • [step 1] • [step 2] • [step 3]
|
||||
MODE: review
|
||||
CONTEXT: [review target description] | Memory: [relevant context]
|
||||
EXPECTED: [deliverable format] + [quality criteria]
|
||||
CONSTRAINTS: [focus constraints]
|
||||
```
|
||||
|
||||
## EXECUTION INSTRUCTIONS - START HERE
|
||||
|
||||
**When this command is triggered, follow these exact steps:**
|
||||
|
||||
### Step 1: Parse Arguments
|
||||
|
||||
Check if user provided arguments directly:
|
||||
- `--uncommitted` → Record target = uncommitted
|
||||
- `--base <branch>` → Record target = base, branch name
|
||||
- `--commit <sha>` → Record target = commit, sha value
|
||||
- `--model <model>` → Record model selection
|
||||
- `--title <title>` → Record title
|
||||
- Remaining text → Use as custom focus/prompt
|
||||
|
||||
If no target specified → Continue to Step 2 for interactive selection.
|
||||
|
||||
### Step 2: Interactive Parameter Selection
|
||||
|
||||
**2.1 Review Target Selection**
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "What do you want to review?",
|
||||
header: "Review Target",
|
||||
options: [
|
||||
{ label: "Uncommitted changes (Recommended)", description: "Review staged, unstaged, and untracked changes" },
|
||||
{ label: "Compare to branch", description: "Review changes against a base branch (e.g., main)" },
|
||||
{ label: "Specific commit", description: "Review changes introduced by a specific commit" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**2.2 Branch/Commit Input (if needed)**
|
||||
|
||||
If "Compare to branch" selected:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Which base branch to compare against?",
|
||||
header: "Base Branch",
|
||||
options: [
|
||||
{ label: "main", description: "Compare against main branch" },
|
||||
{ label: "master", description: "Compare against master branch" },
|
||||
{ label: "develop", description: "Compare against develop branch" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
If "Specific commit" selected:
|
||||
- Run `git log --oneline -10` to show recent commits
|
||||
- Ask user to provide commit SHA or select from list
|
||||
|
||||
**2.3 Model Selection (Optional)**
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Which model to use for review?",
|
||||
header: "Model",
|
||||
options: [
|
||||
{ label: "Default", description: "Use codex default model (gpt-5.2)" },
|
||||
{ label: "o3", description: "OpenAI o3 reasoning model" },
|
||||
{ label: "gpt-4.1", description: "GPT-4.1 model" },
|
||||
{ label: "o4-mini", description: "OpenAI o4-mini (faster)" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**2.4 Review Focus Selection**
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "What should the review focus on?",
|
||||
header: "Focus Area",
|
||||
options: [
|
||||
{ label: "General review (Recommended)", description: "Comprehensive review: correctness, style, bugs, docs" },
|
||||
{ label: "Security focus", description: "Security vulnerabilities, input validation, auth issues" },
|
||||
{ label: "Performance focus", description: "Performance bottlenecks, complexity, resource usage" },
|
||||
{ label: "Code quality", description: "Readability, maintainability, SOLID principles" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
### Step 3: Build Prompt and Command
|
||||
|
||||
**3.1 Construct Prompt Based on Focus**
|
||||
|
||||
**General Review Prompt:**
|
||||
```
|
||||
PURPOSE: Comprehensive code review to identify issues, improve quality, and ensure best practices; success = actionable feedback with clear priorities
|
||||
TASK: • Review code correctness and logic errors • Check coding standards and consistency • Identify potential bugs and edge cases • Evaluate documentation completeness
|
||||
MODE: review
|
||||
CONTEXT: {target_description} | Memory: Project conventions from CLAUDE.md
|
||||
EXPECTED: Structured review report with: severity levels (Critical/High/Medium/Low), file:line references, specific improvement suggestions, priority ranking
|
||||
CONSTRAINTS: Focus on actionable feedback
|
||||
```
|
||||
|
||||
**Security Focus Prompt:**
|
||||
```
|
||||
PURPOSE: Security-focused code review to identify vulnerabilities and security risks; success = all security issues documented with remediation
|
||||
TASK: • Scan for injection vulnerabilities (SQL, XSS, command) • Check authentication and authorization logic • Evaluate input validation and sanitization • Identify sensitive data exposure risks
|
||||
MODE: review
|
||||
CONTEXT: {target_description} | Memory: Security best practices, OWASP Top 10
|
||||
EXPECTED: Security report with: vulnerability classification, CVE references where applicable, remediation code snippets, risk severity matrix
|
||||
CONSTRAINTS: Security-first analysis | Flag all potential vulnerabilities
|
||||
```
|
||||
|
||||
**Performance Focus Prompt:**
|
||||
```
|
||||
PURPOSE: Performance-focused code review to identify bottlenecks and optimization opportunities; success = measurable improvement recommendations
|
||||
TASK: • Analyze algorithmic complexity (Big-O) • Identify memory allocation issues • Check for N+1 queries and blocking operations • Evaluate caching opportunities
|
||||
MODE: review
|
||||
CONTEXT: {target_description} | Memory: Performance patterns and anti-patterns
|
||||
EXPECTED: Performance report with: complexity analysis, bottleneck identification, optimization suggestions with expected impact, benchmark recommendations
|
||||
CONSTRAINTS: Performance optimization focus
|
||||
```
|
||||
|
||||
**Code Quality Focus Prompt:**
|
||||
```
|
||||
PURPOSE: Code quality review to improve maintainability and readability; success = cleaner, more maintainable code
|
||||
TASK: • Assess SOLID principles adherence • Identify code duplication and abstraction opportunities • Review naming conventions and clarity • Evaluate test coverage implications
|
||||
MODE: review
|
||||
CONTEXT: {target_description} | Memory: Project coding standards
|
||||
EXPECTED: Quality report with: principle violations, refactoring suggestions, naming improvements, maintainability score
|
||||
CONSTRAINTS: Code quality and maintainability focus
|
||||
```
|
||||
|
||||
**3.2 Build Target Description**
|
||||
|
||||
Based on selection, set `{target_description}`:
|
||||
- Uncommitted: `Reviewing uncommitted changes (staged + unstaged + untracked)`
|
||||
- Base branch: `Reviewing changes against {branch} branch`
|
||||
- Commit: `Reviewing changes introduced by commit {sha}`
|
||||
|
||||
### Step 4: Execute via CCW CLI
|
||||
|
||||
Build and execute the ccw cli command:
|
||||
|
||||
```bash
|
||||
# Base structure
|
||||
ccw cli -p "<PROMPT>" --tool codex --mode review [OPTIONS]
|
||||
```
|
||||
|
||||
**Command Construction:**
|
||||
|
||||
```bash
|
||||
# Variables from user selection
|
||||
TARGET_FLAG="" # --uncommitted | --base <branch> | --commit <sha>
|
||||
MODEL_FLAG="" # --model <model> (if not default)
|
||||
TITLE_FLAG="" # --title "<title>" (if provided)
|
||||
|
||||
# Build target flag
|
||||
if [ "$target" = "uncommitted" ]; then
|
||||
TARGET_FLAG="--uncommitted"
|
||||
elif [ "$target" = "base" ]; then
|
||||
TARGET_FLAG="--base $branch"
|
||||
elif [ "$target" = "commit" ]; then
|
||||
TARGET_FLAG="--commit $sha"
|
||||
fi
|
||||
|
||||
# Build model flag (only if not default)
|
||||
if [ "$model" != "default" ] && [ -n "$model" ]; then
|
||||
MODEL_FLAG="--model $model"
|
||||
fi
|
||||
|
||||
# Build title flag (if provided)
|
||||
if [ -n "$title" ]; then
|
||||
TITLE_FLAG="--title \"$title\""
|
||||
fi
|
||||
|
||||
# Execute
|
||||
ccw cli -p "$PROMPT" --tool codex --mode review $TARGET_FLAG $MODEL_FLAG $TITLE_FLAG
|
||||
```
|
||||
|
||||
**Full Example Commands:**
|
||||
|
||||
**Option 1: With custom prompt (reviews uncommitted by default):**
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Comprehensive code review to identify issues and improve quality; success = actionable feedback with priorities
|
||||
TASK: • Review correctness and logic • Check standards compliance • Identify bugs and edge cases • Evaluate documentation
|
||||
MODE: review
|
||||
CONTEXT: Reviewing uncommitted changes | Memory: Project conventions
|
||||
EXPECTED: Structured report with severity levels, file:line refs, improvement suggestions
|
||||
CONSTRAINTS: Actionable feedback
|
||||
" --tool codex --mode review --rule analysis-review-code-quality
|
||||
```
|
||||
|
||||
**Option 2: Target flag only (no prompt allowed):**
|
||||
```bash
|
||||
ccw cli --tool codex --mode review --uncommitted
|
||||
```
|
||||
|
||||
### Step 5: Execute and Display Results
|
||||
|
||||
```bash
|
||||
Bash({
|
||||
command: "ccw cli -p \"$PROMPT\" --tool codex --mode review $FLAGS",
|
||||
run_in_background: true
|
||||
})
|
||||
```
|
||||
|
||||
Wait for completion and display formatted results.
|
||||
|
||||
## Quick Usage Examples
|
||||
|
||||
### Direct Execution (No Interaction)
|
||||
|
||||
```bash
|
||||
# Review uncommitted changes with default settings
|
||||
/cli:codex-review --uncommitted
|
||||
|
||||
# Review against main branch
|
||||
/cli:codex-review --base main
|
||||
|
||||
# Review specific commit
|
||||
/cli:codex-review --commit abc123
|
||||
|
||||
# Review with custom model
|
||||
/cli:codex-review --uncommitted --model o3
|
||||
|
||||
# Review with security focus
|
||||
/cli:codex-review --uncommitted security
|
||||
|
||||
# Full options
|
||||
/cli:codex-review --base main --model o3 --title "Auth Feature" security
|
||||
```
|
||||
|
||||
### Interactive Mode
|
||||
|
||||
```bash
|
||||
# Start interactive selection (guided flow)
|
||||
/cli:codex-review
|
||||
```
|
||||
|
||||
## Focus Area Mapping
|
||||
|
||||
| User Selection | Prompt Focus | Key Checks |
|
||||
|----------------|--------------|------------|
|
||||
| General review | Comprehensive | Correctness, style, bugs, docs |
|
||||
| Security focus | Security-first | Injection, auth, validation, exposure |
|
||||
| Performance focus | Optimization | Complexity, memory, queries, caching |
|
||||
| Code quality | Maintainability | SOLID, duplication, naming, tests |
|
||||
|
||||
## Error Handling
|
||||
|
||||
### No Changes to Review
|
||||
```
|
||||
No changes found for review target. Suggestions:
|
||||
- For --uncommitted: Make some code changes first
|
||||
- For --base: Ensure branch exists and has diverged
|
||||
- For --commit: Verify commit SHA exists
|
||||
```
|
||||
|
||||
### Invalid Branch
|
||||
```bash
|
||||
# Show available branches
|
||||
git branch -a --list | head -20
|
||||
```
|
||||
|
||||
### Invalid Commit
|
||||
```bash
|
||||
# Show recent commits
|
||||
git log --oneline -10
|
||||
```
|
||||
|
||||
## Integration Notes
|
||||
|
||||
- Uses `ccw cli --tool codex --mode review` endpoint
|
||||
- Model passed via prompt (codex uses `-c model=` internally)
|
||||
- Target flags (`--uncommitted`, `--base`, `--commit`) passed through to codex
|
||||
- Prompt follows standard ccw cli template format for consistency
|
||||
|
||||
## Validation Constraints
|
||||
|
||||
**IMPORTANT: Target flags and prompt are mutually exclusive**
|
||||
|
||||
The codex CLI has a constraint where target flags (`--uncommitted`, `--base`, `--commit`) cannot be used with a positional `[PROMPT]` argument:
|
||||
|
||||
```
|
||||
error: the argument '--uncommitted' cannot be used with '[PROMPT]'
|
||||
error: the argument '--base <BRANCH>' cannot be used with '[PROMPT]'
|
||||
error: the argument '--commit <SHA>' cannot be used with '[PROMPT]'
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- When ANY target flag is specified, ccw cli automatically skips template concatenation (systemRules/roles)
|
||||
- The review uses codex's default review behavior for the specified target
|
||||
- Custom prompts are only supported WITHOUT target flags (reviews uncommitted changes by default)
|
||||
|
||||
**Valid combinations:**
|
||||
| Command | Result |
|
||||
|---------|--------|
|
||||
| `codex review "Focus on security"` | ✓ Custom prompt, reviews uncommitted (default) |
|
||||
| `codex review --uncommitted` | ✓ No prompt, uses default review |
|
||||
| `codex review --base main` | ✓ No prompt, uses default review |
|
||||
| `codex review --commit abc123` | ✓ No prompt, uses default review |
|
||||
| `codex review --uncommitted "prompt"` | ✗ Invalid - mutually exclusive |
|
||||
| `codex review --base main "prompt"` | ✗ Invalid - mutually exclusive |
|
||||
| `codex review --commit abc123 "prompt"` | ✗ Invalid - mutually exclusive |
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# ✓ Valid: prompt only (reviews uncommitted by default)
|
||||
ccw cli -p "Focus on security" --tool codex --mode review
|
||||
|
||||
# ✓ Valid: target flag only (no prompt)
|
||||
ccw cli --tool codex --mode review --uncommitted
|
||||
ccw cli --tool codex --mode review --base main
|
||||
ccw cli --tool codex --mode review --commit abc123
|
||||
|
||||
# ✗ Invalid: target flag with prompt (will fail)
|
||||
ccw cli -p "Review this" --tool codex --mode review --uncommitted
|
||||
ccw cli -p "Review this" --tool codex --mode review --base main
|
||||
ccw cli -p "Review this" --tool codex --mode review --commit abc123
|
||||
```
|
||||
@@ -1,320 +0,0 @@
|
||||
---
|
||||
name: discuss-plan
|
||||
description: Multi-round collaborative planning using Gemini, Codex, and Claude synthesis with iterative discussion cycles (read-only, no code changes)
|
||||
argument-hint: "[--topic '...'] [--task-id '...'] [--rounds N]"
|
||||
allowed-tools: SlashCommand(*), Bash(*), TodoWrite(*), Read(*), Glob(*)
|
||||
---
|
||||
|
||||
# CLI Discuss-Plan Command (/cli:discuss-plan)
|
||||
|
||||
## Purpose
|
||||
|
||||
Orchestrates a multi-model collaborative discussion for in-depth planning and problem analysis. This command facilitates an iterative dialogue between Gemini, Codex, and Claude (the orchestrating AI) to explore a topic from multiple perspectives, refine ideas, and build a robust plan.
|
||||
|
||||
**This command is for discussion and planning ONLY. It does NOT modify any code.**
|
||||
|
||||
## Core Workflow: The Discussion Loop
|
||||
|
||||
The command operates in iterative rounds, allowing the plan to evolve with each cycle. The user can choose to continue for more rounds or conclude when consensus is reached.
|
||||
|
||||
```
|
||||
Topic Input → [Round 1: Gemini → Codex → Claude] → [User Review] →
|
||||
[Round 2: Gemini → Codex → Claude] → ... → Final Plan
|
||||
```
|
||||
|
||||
### Model Roles & Priority
|
||||
|
||||
**Priority Order**: Gemini > Codex > Claude
|
||||
|
||||
1. **Gemini (The Analyst)** - Priority 1
|
||||
- Kicks off each round with deep analysis
|
||||
- Provides foundational ideas and draft plans
|
||||
- Analyzes current context or previous synthesis
|
||||
|
||||
2. **Codex (The Architect/Critic)** - Priority 2
|
||||
- Reviews Gemini's output critically
|
||||
- Uses deep reasoning for technical trade-offs
|
||||
- Proposes alternative strategies
|
||||
- **Participates purely in conversational/reasoning capacity**
|
||||
- Uses resume mechanism to maintain discussion context
|
||||
|
||||
3. **Claude (The Synthesizer/Moderator)** - Priority 3
|
||||
- Synthesizes discussion from Gemini and Codex
|
||||
- Highlights agreements and contentions
|
||||
- Structures refined plan
|
||||
- Poses key questions for next round
|
||||
|
||||
## Parameters
|
||||
|
||||
- `<input>` (Required): Topic description or task ID (e.g., "Design a new caching layer" or `PLAN-002`)
|
||||
- `--rounds <N>` (Optional): Maximum number of discussion rounds (default: prompts after each round)
|
||||
- `--task-id <id>` (Optional): Associates discussion with workflow task ID
|
||||
- `--topic <description>` (Optional): High-level topic for discussion
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Phase 1: Initial Setup
|
||||
|
||||
1. **Input Processing**: Parse topic or task ID
|
||||
2. **Context Gathering**: Identify relevant files based on topic
|
||||
|
||||
### Phase 2: Discussion Round
|
||||
|
||||
Each round consists of three sequential steps, tracked via `TodoWrite`.
|
||||
|
||||
**Step 1: Gemini's Analysis (Priority 1)**
|
||||
|
||||
Gemini analyzes the topic and proposes preliminary plan.
|
||||
|
||||
```bash
|
||||
# Round 1: CONTEXT_INPUT is the initial topic
|
||||
# Subsequent rounds: CONTEXT_INPUT is the synthesis from previous round
|
||||
gemini -p "
|
||||
PURPOSE: Analyze and propose a plan for '[topic]'
|
||||
TASK: Provide initial analysis, identify key modules, and draft implementation plan
|
||||
MODE: analysis
|
||||
CONTEXT: @CLAUDE.md [auto-detected files]
|
||||
INPUT: [CONTEXT_INPUT]
|
||||
EXPECTED: Structured analysis and draft plan for discussion
|
||||
RULES: Focus on technical depth and practical considerations
|
||||
"
|
||||
```
|
||||
|
||||
**Step 2: Codex's Critique (Priority 2)**
|
||||
|
||||
Codex reviews Gemini's output using conversational reasoning. Uses `resume --last` to maintain context across rounds.
|
||||
|
||||
```bash
|
||||
# First round (new session)
|
||||
codex --full-auto exec "
|
||||
PURPOSE: Critically review technical plan
|
||||
TASK: Review the provided plan, identify weaknesses, suggest alternatives, reason about trade-offs
|
||||
MODE: analysis
|
||||
CONTEXT: @CLAUDE.md [relevant files]
|
||||
INPUT_PLAN: [Output from Gemini's analysis]
|
||||
EXPECTED: Critical review with alternative ideas and risk analysis
|
||||
RULES: Focus on architectural soundness and implementation feasibility
|
||||
" --skip-git-repo-check
|
||||
|
||||
# Subsequent rounds (resume discussion)
|
||||
codex --full-auto exec "
|
||||
PURPOSE: Re-evaluate plan based on latest synthesis
|
||||
TASK: Review updated plan and discussion points, provide further critique or refined ideas
|
||||
MODE: analysis
|
||||
CONTEXT: Previous discussion context (maintained via resume)
|
||||
INPUT_PLAN: [Output from Gemini's analysis for current round]
|
||||
EXPECTED: Updated critique building on previous discussion
|
||||
RULES: Build on previous insights, avoid repeating points
|
||||
" resume --last --skip-git-repo-check
|
||||
```
|
||||
|
||||
**Step 3: Claude's Synthesis (Priority 3)**
|
||||
|
||||
Claude (orchestrating AI) synthesizes both outputs:
|
||||
|
||||
- Summarizes Gemini's proposal and Codex's critique
|
||||
- Highlights agreements and disagreements
|
||||
- Structures consolidated plan
|
||||
- Presents open questions for next round
|
||||
- This synthesis becomes input for next round
|
||||
|
||||
### Phase 3: User Review and Iteration
|
||||
|
||||
1. **Present Synthesis**: Show synthesized plan and key discussion points
|
||||
2. **Continue or Conclude**: Use AskUserQuestion to prompt user:
|
||||
|
||||
```typescript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Round of discussion complete. What is the next step?",
|
||||
header: "Next Round",
|
||||
options: [
|
||||
{ label: "Start another round", description: "Continue the discussion to refine the plan further." },
|
||||
{ label: "Conclude and finalize", description: "End the discussion and save the final plan." }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
3. **Loop or Finalize**:
|
||||
- Continue → New round with Gemini analyzing latest synthesis
|
||||
- Conclude → Save final synthesized document
|
||||
|
||||
## TodoWrite Tracking
|
||||
|
||||
Progress tracked for each round and model.
|
||||
|
||||
```javascript
|
||||
// Example for 2-round discussion
|
||||
TodoWrite({
|
||||
todos: [
|
||||
// Round 1
|
||||
{ content: "[Round 1] Gemini: Analyzing topic", status: "completed", activeForm: "Analyzing with Gemini" },
|
||||
{ content: "[Round 1] Codex: Critiquing plan", status: "completed", activeForm: "Critiquing with Codex" },
|
||||
{ content: "[Round 1] Claude: Synthesizing discussion", status: "completed", activeForm: "Synthesizing discussion" },
|
||||
{ content: "[User Action] Review Round 1 and decide next step", status: "in_progress", activeForm: "Awaiting user decision" },
|
||||
|
||||
// Round 2
|
||||
{ content: "[Round 2] Gemini: Analyzing refined plan", status: "pending", activeForm: "Analyzing refined plan" },
|
||||
{ content: "[Round 2] Codex: Re-evaluating plan [resume]", status: "pending", activeForm: "Re-evaluating with Codex" },
|
||||
{ content: "[Round 2] Claude: Finalizing plan", status: "pending", activeForm: "Finalizing plan" },
|
||||
{ content: "Discussion complete - Final plan generated", status: "pending", activeForm: "Generating final document" }
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
## Output Routing
|
||||
|
||||
- **Primary Log**: Entire multi-round discussion logged to single file:
|
||||
- `.workflow/WFS-[id]/.chat/discuss-plan-[topic]-[timestamp].md`
|
||||
- **Final Plan**: Clean final version saved upon conclusion:
|
||||
- `.workflow/WFS-[id]/.summaries/plan-[topic].md`
|
||||
- **Scratchpad**: If no session active:
|
||||
- `.workflow/.scratchpad/discuss-plan-[topic]-[timestamp].md`
|
||||
|
||||
## Discussion Structure
|
||||
|
||||
Each round's output is structured as:
|
||||
|
||||
```markdown
|
||||
## Round N: [Topic]
|
||||
|
||||
### Gemini's Analysis (Priority 1)
|
||||
[Gemini's full analysis and proposal]
|
||||
|
||||
### Codex's Critique (Priority 2)
|
||||
[Codex's critical review and alternatives]
|
||||
|
||||
### Claude's Synthesis (Priority 3)
|
||||
**Points of Agreement:**
|
||||
- [Agreement 1]
|
||||
- [Agreement 2]
|
||||
|
||||
**Points of Contention:**
|
||||
- [Issue 1]: Gemini suggests X, Codex suggests Y
|
||||
- [Issue 2]: Trade-off between A and B
|
||||
|
||||
**Consolidated Plan:**
|
||||
[Structured plan incorporating both perspectives]
|
||||
|
||||
**Open Questions for Next Round:**
|
||||
1. [Question 1]
|
||||
2. [Question 2]
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Multi-Round Architecture Discussion
|
||||
|
||||
**Command**: `/cli:discuss-plan --topic "Design a real-time notification system"`
|
||||
|
||||
**Round 1**:
|
||||
1. **Gemini**: Proposes WebSocket-based architecture with RabbitMQ message queue
|
||||
2. **Codex**: Critiques as overly complex for MVP. Suggests Server-Sent Events (SSE) for simplicity (one-way notifications). Questions RabbitMQ necessity, proposes simpler Redis Pub/Sub
|
||||
3. **Claude**: Synthesizes views:
|
||||
- **Plan A (Gemini)**: WebSockets + RabbitMQ (highly scalable, complex)
|
||||
- **Plan B (Codex)**: SSE + Redis (simpler, less overhead)
|
||||
- **Open Question**: Is bi-directional communication critical, or is simplicity priority?
|
||||
4. **User Action**: Opts for another round to explore trade-offs
|
||||
|
||||
**Round 2**:
|
||||
1. **Gemini**: Analyzes synthesized document. Notes that if features like "user is typing" indicators are roadmapped, WebSockets better long-term. Drafts plan starting with SSE/Redis but designing for easy migration
|
||||
2. **Codex**: Reviews migration plan. Reasons that migration itself could be complex. If feature set likely to expand, starting with WebSockets using managed service might be best cost/benefit
|
||||
3. **Claude**: Synthesizes new discussion:
|
||||
- **Consensus**: Simple SSE/Redis too short-sighted
|
||||
- **Refined Options**:
|
||||
1. Phased approach (SSE → WebSocket) with clear migration plan
|
||||
2. Direct WebSocket with managed service (Pusher, Ably) to reduce ops overhead
|
||||
- **Recommendation**: Option 2 most robust and future-proof
|
||||
4. **User Action**: Agrees with recommendation, concludes discussion
|
||||
|
||||
**Final Output**: Planning document saved with:
|
||||
- Chosen architecture (Managed WebSocket service)
|
||||
- Multi-round reasoning
|
||||
- High-level implementation steps
|
||||
|
||||
### Example 2: Feature Design Discussion
|
||||
|
||||
**Command**: `/cli:discuss-plan --topic "Design user permission system" --rounds 2`
|
||||
|
||||
**Round 1**:
|
||||
1. **Gemini**: Proposes RBAC (Role-Based Access Control) with predefined roles
|
||||
2. **Codex**: Suggests ABAC (Attribute-Based Access Control) for more flexibility
|
||||
3. **Claude**: Synthesizes trade-offs between simplicity (RBAC) vs flexibility (ABAC)
|
||||
|
||||
**Round 2**:
|
||||
1. **Gemini**: Analyzes hybrid approach - RBAC for core permissions, attributes for fine-grained control
|
||||
2. **Codex**: Reviews hybrid model, identifies implementation challenges
|
||||
3. **Claude**: Final plan with phased rollout strategy
|
||||
|
||||
**Automatic Conclusion**: Command concludes after 2 rounds as specified
|
||||
|
||||
### Example 3: Problem-Solving Discussion
|
||||
|
||||
**Command**: `/cli:discuss-plan --topic "Debug memory leak in data pipeline" --task-id ISSUE-042`
|
||||
|
||||
**Round 1**:
|
||||
1. **Gemini**: Identifies potential leak sources (unclosed handles, growing cache, event listeners)
|
||||
2. **Codex**: Adds profiling tool recommendations, suggests memory monitoring
|
||||
3. **Claude**: Structures debugging plan with phased approach
|
||||
|
||||
**User Decision**: Single round sufficient, concludes with debugging strategy
|
||||
|
||||
## Consensus Mechanisms
|
||||
|
||||
**When to Continue:**
|
||||
- Significant disagreement between models
|
||||
- Open questions requiring deeper analysis
|
||||
- Trade-offs need more exploration
|
||||
- User wants additional perspectives
|
||||
|
||||
**When to Conclude:**
|
||||
- Models converge on solution
|
||||
- All key questions addressed
|
||||
- User satisfied with plan depth
|
||||
- Maximum rounds reached (if specified)
|
||||
|
||||
## Comparison with Other Commands
|
||||
|
||||
| Command | Models | Rounds | Discussion | Implementation | Use Case |
|
||||
|---------|--------|--------|------------|----------------|----------|
|
||||
| `/cli:mode:plan` | Gemini | 1 | NO | NO | Single-model planning |
|
||||
| `/cli:analyze` | Gemini/Qwen | 1 | NO | NO | Code analysis |
|
||||
| `/cli:execute` | Any | 1 | NO | YES | Direct implementation |
|
||||
| `/cli:codex-execute` | Codex | 1 | NO | YES | Multi-stage implementation |
|
||||
| `/cli:discuss-plan` | **Gemini+Codex+Claude** | **Multiple** | **YES** | **NO** | **Multi-perspective planning** |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use for Complex Decisions**: Ideal for architectural decisions, design trade-offs, problem-solving
|
||||
2. **Start with Broad Topic**: Let first round establish scope, subsequent rounds refine
|
||||
3. **Review Each Synthesis**: Claude's synthesis is key decision point - review carefully
|
||||
4. **Know When to Stop**: Don't over-iterate - 2-3 rounds usually sufficient
|
||||
5. **Task Association**: Use `--task-id` for traceability in workflow
|
||||
6. **Save Intermediate Results**: Each round's synthesis saved automatically
|
||||
7. **Let Models Disagree**: Divergent views often reveal important trade-offs
|
||||
8. **Focus Questions**: Use Claude's open questions to guide next round
|
||||
|
||||
## Breaking Discussion Loops
|
||||
|
||||
**Detecting Loops:**
|
||||
- Models repeating same arguments
|
||||
- No new insights emerging
|
||||
- Trade-offs well understood
|
||||
|
||||
**Breaking Strategies:**
|
||||
1. **User Decision**: Make executive decision when enough info gathered
|
||||
2. **Timeboxing**: Set max rounds upfront with `--rounds`
|
||||
3. **Criteria-Based**: Define decision criteria before starting
|
||||
4. **Hybrid Approach**: Accept multiple valid solutions in final plan
|
||||
|
||||
## Notes
|
||||
|
||||
- **Pure Discussion**: This command NEVER modifies code - only produces planning documents
|
||||
- **Codex Role**: Codex participates as reasoning/critique tool, not executor
|
||||
- **Resume Context**: Codex maintains discussion context via `resume --last`
|
||||
- **Priority System**: Ensures Gemini leads analysis, Codex provides critique, Claude synthesizes
|
||||
- **Output Quality**: Multi-perspective discussion produces more robust plans than single-model analysis
|
||||
- Command patterns and session management: see intelligent-tools-strategy.md (loaded in memory)
|
||||
- For implementation after discussion, use `/cli:execute` or `/cli:codex-execute` separately
|
||||
@@ -1,202 +0,0 @@
|
||||
---
|
||||
name: execute
|
||||
description: Autonomous code implementation with YOLO auto-approval using Gemini/Qwen/Codex, supports task ID or description input with automatic file pattern detection
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] description or task-id"
|
||||
allowed-tools: SlashCommand(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Execute Command (/cli:execute)
|
||||
|
||||
## Purpose
|
||||
|
||||
Execute implementation tasks with **YOLO permissions** (auto-approves all confirmations). **MODIFIES CODE**.
|
||||
|
||||
**Intent**: Autonomous code implementation, modification, and generation
|
||||
**Supported Tools**: codex, gemini (default), qwen
|
||||
**Key Feature**: Automatic context inference and file pattern detection
|
||||
|
||||
## Core Behavior
|
||||
|
||||
1. **Code Modification**: This command MODIFIES, CREATES, and DELETES code files
|
||||
2. **Auto-Approval**: YOLO mode bypasses confirmation prompts for all operations
|
||||
3. **Implementation Focus**: Executes actual code changes, not just recommendations
|
||||
4. **Requires Explicit Intent**: Use only when implementation is intended
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### YOLO Permissions
|
||||
Auto-approves: file pattern inference, execution, **file modifications**, summary generation
|
||||
|
||||
**WARNING**: This command will make actual code changes without manual confirmation
|
||||
|
||||
### Execution Modes
|
||||
|
||||
**1. Description Mode** (supports `--enhance`):
|
||||
- Input: Natural language description
|
||||
- Process: [Optional: Enhance] → Keyword analysis → Pattern inference → Execute
|
||||
|
||||
**2. Task ID Mode** (no `--enhance`):
|
||||
- Input: Workflow task identifier (e.g., `IMPL-001`)
|
||||
- Process: Task JSON parsing → Scope analysis → Execute
|
||||
|
||||
**3. Agent Mode** (default):
|
||||
- Input: Description or task-id
|
||||
- Process: 5-Phase Workflow → Context Discovery → Optimal Tool Selection → Execute
|
||||
|
||||
### Context Inference
|
||||
|
||||
Auto-selects files based on keywords and technology (each @ references one pattern):
|
||||
- "auth" → `@**/*auth* @**/*user*`
|
||||
- "React" → `@src/**/*.jsx @src/**/*.tsx`
|
||||
- "api" → `@**/api/**/* @**/routes/**/*`
|
||||
- Always includes: `@CLAUDE.md @**/*CLAUDE.md`
|
||||
|
||||
For precise file targeting, use `rg` or MCP tools to discover files first.
|
||||
|
||||
### Codex Session Continuity
|
||||
|
||||
**Resume Pattern** for related tasks:
|
||||
```bash
|
||||
# First task - establish session
|
||||
codex -C [dir] --full-auto exec "[task]" --skip-git-repo-check -s danger-full-access
|
||||
|
||||
# Related task - continue session
|
||||
codex --full-auto exec "[related-task]" resume --last --skip-git-repo-check -s danger-full-access
|
||||
```
|
||||
|
||||
Use `resume --last` when current task extends/relates to previous execution. See intelligent-tools-strategy.md for auto-resume rules.
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <codex|gemini|qwen>` - Select CLI tool (default: auto-select by agent based on complexity)
|
||||
- `--enhance` - Enhance input with `/enhance-prompt` first (Description Mode only)
|
||||
- `<description|task-id>` - Natural language description or task identifier
|
||||
- `--debug` - Verbose logging
|
||||
- `--save-session` - Save execution to workflow session
|
||||
|
||||
## Workflow Integration
|
||||
|
||||
**Session Management**: Auto-detects `.workflow/.active-*` marker
|
||||
- Active session: Save to `.workflow/WFS-[id]/.chat/execute-[timestamp].md`
|
||||
- No session: Create new session or save to scratchpad
|
||||
|
||||
**Task Integration**: Load from `.task/[TASK-ID].json`, update status, generate summary
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated implementation:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Autonomous code implementation with YOLO auto-approval",
|
||||
prompt=`
|
||||
Task: ${description_or_task_id}
|
||||
Mode: execute
|
||||
Tool: ${tool_flag || 'auto-select'}
|
||||
Enhance: ${enhance_flag}
|
||||
Task-ID: ${task_id}
|
||||
|
||||
Execute autonomous code implementation with full modification permissions:
|
||||
|
||||
1. Task Analysis:
|
||||
${task_id ? '- Load task spec from .task/' + task_id + '.json' : ''}
|
||||
- Parse requirements and implementation scope
|
||||
- Classify complexity (simple/medium/complex)
|
||||
- Extract keywords for context discovery
|
||||
|
||||
2. Context Discovery:
|
||||
- Discover implementation files using MCP/ripgrep
|
||||
- Identify existing patterns and conventions (CLAUDE.md)
|
||||
- Map dependencies and integration points
|
||||
- Gather related tests and documentation
|
||||
- Auto-detect file patterns from keywords
|
||||
|
||||
3. Tool Selection & Execution:
|
||||
- Complexity assessment:
|
||||
* Simple/Medium → Gemini/Qwen (MODE=write, --approval-mode yolo)
|
||||
* Complex → Codex (MODE=auto, --skip-git-repo-check -s danger-full-access)
|
||||
- Tool preference: ${tool_flag || 'auto-select based on complexity'}
|
||||
- Apply appropriate implementation template
|
||||
- Execute with YOLO auto-approval (bypasses all confirmations)
|
||||
|
||||
4. Implementation:
|
||||
- Modify/create/delete code files per requirements
|
||||
- Follow existing code patterns and conventions
|
||||
- Include comprehensive context in CLI command
|
||||
- Ensure working implementation with proper error handling
|
||||
|
||||
5. Output & Documentation:
|
||||
- Save execution log: .workflow/WFS-[id]/.chat/execute-[timestamp].md
|
||||
${task_id ? '- Generate task summary: .workflow/WFS-[id]/.summaries/' + task_id + '-summary.md' : ''}
|
||||
${task_id ? '- Update task status in .task/' + task_id + '.json' : ''}
|
||||
- Document all code changes made
|
||||
|
||||
⚠️ YOLO Mode: All file operations auto-approved without confirmation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Output**: `.workflow/WFS-[id]/.chat/execute-[timestamp].md` + `.summaries/[TASK-ID]-summary.md` (or `.scratchpad/` if no session)
|
||||
|
||||
## Examples
|
||||
|
||||
**Basic Implementation** (modifies code):
|
||||
```bash
|
||||
/cli:execute "implement JWT authentication with middleware"
|
||||
# Agent Phase 1: Classifies intent=execute, complexity=medium, keywords=['jwt', 'auth', 'middleware']
|
||||
# Agent Phase 2: Discovers auth patterns, existing middleware structure
|
||||
# Agent Phase 3: Selects Gemini (medium complexity)
|
||||
# Agent Phase 4: Executes with auto-approval
|
||||
# Result: NEW/MODIFIED code files with JWT implementation
|
||||
```
|
||||
|
||||
**Complex Implementation** (modifies code):
|
||||
```bash
|
||||
/cli:execute "implement OAuth2 authentication with token refresh"
|
||||
# Agent Phase 1: Classifies intent=execute, complexity=complex, keywords=['oauth2', 'auth', 'token', 'refresh']
|
||||
# Agent Phase 2: MCP discovers auth patterns, existing middleware, JWT dependencies
|
||||
# Agent Phase 3: Enhances prompt with discovered patterns and best practices
|
||||
# Agent Phase 4: Selects Codex (complex task), executes with comprehensive context
|
||||
# Agent Phase 5: Saves execution log + generates implementation summary
|
||||
# Result: Complete OAuth2 implementation + detailed execution log
|
||||
```
|
||||
|
||||
**Enhanced Implementation** (modifies code):
|
||||
```bash
|
||||
/cli:execute --enhance "implement JWT authentication"
|
||||
# Step 1: Enhance to expand requirements
|
||||
# Step 2: Execute implementation with auto-approval
|
||||
# Result: Complete auth system with MODIFIED code files
|
||||
```
|
||||
|
||||
**Task Execution** (modifies code):
|
||||
```bash
|
||||
/cli:execute IMPL-001
|
||||
# Reads: .task/IMPL-001.json for requirements
|
||||
# Executes: Implementation based on task spec
|
||||
# Result: Code changes per task definition
|
||||
```
|
||||
|
||||
**Codex Implementation** (modifies code):
|
||||
```bash
|
||||
/cli:execute --tool codex "optimize database queries"
|
||||
# Executes: Codex with full file access
|
||||
# Result: MODIFIED query code, new indexes, updated tests
|
||||
```
|
||||
|
||||
**Qwen Code Generation** (modifies code):
|
||||
```bash
|
||||
/cli:execute --tool qwen --enhance "refactor auth module"
|
||||
# Step 1: Enhanced refactoring plan
|
||||
# Step 2: Execute with MODE=write
|
||||
# Result: REFACTORED auth code with structural changes
|
||||
```
|
||||
|
||||
## Comparison with Analysis Commands
|
||||
|
||||
| Command | Intent | Code Changes | Auto-Approve |
|
||||
|---------|--------|--------------|--------------|
|
||||
| `/cli:analyze` | Understand code | NO | N/A |
|
||||
| `/cli:chat` | Ask questions | NO | N/A |
|
||||
| `/cli:execute` | **Implement** | **YES** | **YES** |
|
||||
@@ -1,96 +0,0 @@
|
||||
---
|
||||
name: bug-diagnosis
|
||||
description: Read-only bug root cause analysis using Gemini/Qwen/Codex with systematic diagnosis template for fix suggestions
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] [--cd path] bug description"
|
||||
allowed-tools: SlashCommand(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Mode: Bug Diagnosis (/cli:mode:bug-diagnosis)
|
||||
|
||||
## Purpose
|
||||
|
||||
Systematic bug diagnosis with root cause analysis template (`~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt`).
|
||||
|
||||
**Tool Selection**:
|
||||
- **gemini** (default) - Best for bug diagnosis
|
||||
- **qwen** - Fallback when Gemini unavailable
|
||||
- **codex** - Alternative for complex bug analysis
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <gemini|qwen|codex>` - Tool selection (default: gemini)
|
||||
- `--enhance` - Enhance bug description with `/enhance-prompt`
|
||||
- `--cd "path"` - Target directory for focused diagnosis
|
||||
- `<bug-description>` (Required) - Bug description or error details
|
||||
|
||||
## Tool Usage
|
||||
|
||||
**Gemini** (Primary):
|
||||
```bash
|
||||
# Uses gemini by default, or specify explicitly
|
||||
--tool gemini
|
||||
```
|
||||
|
||||
**Qwen** (Fallback):
|
||||
```bash
|
||||
--tool qwen
|
||||
```
|
||||
|
||||
**Codex** (Alternative):
|
||||
```bash
|
||||
--tool codex
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated bug diagnosis:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Bug root cause diagnosis with fix suggestions",
|
||||
prompt=`
|
||||
Task: ${bug_description}
|
||||
Mode: bug-diagnosis
|
||||
Tool: ${tool_flag || 'gemini'}
|
||||
Directory: ${cd_path || '.'}
|
||||
Enhance: ${enhance_flag}
|
||||
Template: ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
|
||||
Execute systematic bug diagnosis and root cause analysis:
|
||||
|
||||
1. Context Discovery:
|
||||
- Locate error traces, stack traces, and log messages
|
||||
- Find related code sections and affected modules
|
||||
- Identify data flow paths leading to the bug
|
||||
- Discover test cases related to bug area
|
||||
- Use MCP/ripgrep for comprehensive context gathering
|
||||
|
||||
2. Root Cause Analysis:
|
||||
- Apply diagnostic template methodology
|
||||
- Trace execution to identify failure point
|
||||
- Analyze state, data, and logic causing issue
|
||||
- Document potential root causes with evidence
|
||||
- Assess bug severity and impact scope
|
||||
|
||||
3. CLI Command Construction:
|
||||
- Tool: ${tool_flag || 'gemini'} (qwen fallback, codex for complex bugs)
|
||||
- Directory: cd ${cd_path || '.'} &&
|
||||
- Context: @**/* + error traces + affected code
|
||||
- Mode: analysis (read-only)
|
||||
- Template: analysis/01-diagnose-bug-root-cause.txt
|
||||
|
||||
4. Output Generation:
|
||||
- Root cause diagnosis with evidence
|
||||
- Fix suggestions and recommendations
|
||||
- Prevention strategies
|
||||
- Save to .workflow/WFS-[id]/.chat/bug-diagnosis-[timestamp].md (or .scratchpad/)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Core Rules
|
||||
|
||||
- **Read-only**: Diagnoses bugs, does NOT modify code
|
||||
- **Template**: `~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt`
|
||||
- **Output**: `.workflow/WFS-[id]/.chat/bug-diagnosis-[timestamp].md` (or `.scratchpad/` if no session)
|
||||
@@ -1,98 +0,0 @@
|
||||
---
|
||||
name: code-analysis
|
||||
description: Read-only execution path tracing using Gemini/Qwen/Codex with specialized analysis template for call flow and optimization
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] [--cd path] analysis target"
|
||||
allowed-tools: SlashCommand(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Mode: Code Analysis (/cli:mode:code-analysis)
|
||||
|
||||
## Purpose
|
||||
|
||||
Systematic code analysis with execution path tracing template (`~/.claude/workflows/cli-templates/prompts/analysis/01-trace-code-execution.txt`).
|
||||
|
||||
**Tool Selection**:
|
||||
- **gemini** (default) - Best for code analysis and tracing
|
||||
- **qwen** - Fallback when Gemini unavailable
|
||||
- **codex** - Alternative for complex analysis tasks
|
||||
|
||||
**Key Feature**: `--cd` flag for directory-scoped analysis
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <gemini|qwen|codex>` - Tool selection (default: gemini)
|
||||
- `--enhance` - Enhance analysis target with `/enhance-prompt` first
|
||||
- `--cd "path"` - Target directory for focused analysis
|
||||
- `<analysis-target>` (Required) - Code analysis target or question
|
||||
|
||||
## Tool Usage
|
||||
|
||||
**Gemini** (Primary):
|
||||
```bash
|
||||
/cli:mode:code-analysis --tool gemini "trace auth flow"
|
||||
# OR (default)
|
||||
/cli:mode:code-analysis "trace auth flow"
|
||||
```
|
||||
|
||||
**Qwen** (Fallback):
|
||||
```bash
|
||||
/cli:mode:code-analysis --tool qwen "trace auth flow"
|
||||
```
|
||||
|
||||
**Codex** (Alternative):
|
||||
```bash
|
||||
/cli:mode:code-analysis --tool codex "trace auth flow"
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated code analysis:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Execution path tracing and call flow analysis",
|
||||
prompt=`
|
||||
Task: ${analysis_target}
|
||||
Mode: code-analysis
|
||||
Tool: ${tool_flag || 'gemini'}
|
||||
Directory: ${cd_path || '.'}
|
||||
Enhance: ${enhance_flag}
|
||||
Template: ~/.claude/workflows/cli-templates/prompts/analysis/01-trace-code-execution.txt
|
||||
|
||||
Execute systematic code analysis with execution path tracing:
|
||||
|
||||
1. Context Discovery:
|
||||
- Identify entry points and function signatures
|
||||
- Trace call chains and execution flows
|
||||
- Discover related files (implementations, dependencies, tests)
|
||||
- Map data flow and state transformations
|
||||
- Use MCP/ripgrep for comprehensive file discovery
|
||||
|
||||
2. Analysis Execution:
|
||||
- Apply execution tracing template
|
||||
- Generate call flow diagrams (textual)
|
||||
- Document execution paths and branching logic
|
||||
- Identify optimization opportunities
|
||||
|
||||
3. CLI Command Construction:
|
||||
- Tool: ${tool_flag || 'gemini'} (qwen fallback, codex for complex analysis)
|
||||
- Directory: cd ${cd_path || '.'} &&
|
||||
- Context: @**/* + discovered execution context
|
||||
- Mode: analysis (read-only)
|
||||
- Template: analysis/01-trace-code-execution.txt
|
||||
|
||||
4. Output Generation:
|
||||
- Execution trace documentation
|
||||
- Call flow analysis with diagrams
|
||||
- Performance and optimization insights
|
||||
- Save to .workflow/WFS-[id]/.chat/code-analysis-[timestamp].md (or .scratchpad/)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Core Rules
|
||||
|
||||
- **Read-only**: Analyzes code, does NOT modify files
|
||||
- **Template**: `~/.claude/workflows/cli-templates/prompts/analysis/01-trace-code-execution.txt`
|
||||
- **Output**: `.workflow/WFS-[id]/.chat/code-analysis-[timestamp].md` (or `.scratchpad/` if no session)
|
||||
@@ -1,93 +0,0 @@
|
||||
---
|
||||
name: plan
|
||||
description: Read-only architecture planning using Gemini/Qwen/Codex with strategic planning template for modification plans and impact analysis
|
||||
argument-hint: "[--tool codex|gemini|qwen] [--enhance] [--cd path] topic"
|
||||
allowed-tools: SlashCommand(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# CLI Mode: Plan (/cli:mode:plan)
|
||||
|
||||
## Purpose
|
||||
|
||||
Strategic software architecture planning template (`~/.claude/workflows/cli-templates/prompts/planning/01-plan-architecture-design.txt`).
|
||||
|
||||
**Tool Selection**:
|
||||
- **gemini** (default) - Best for architecture planning
|
||||
- **qwen** - Fallback when Gemini unavailable
|
||||
- **codex** - Alternative for implementation planning
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--tool <gemini|qwen|codex>` - Tool selection (default: gemini)
|
||||
- `--enhance` - Enhance task with `/enhance-prompt`
|
||||
- `--cd "path"` - Target directory for focused planning
|
||||
- `<planning-task>` (Required) - Architecture planning task or modification requirements
|
||||
|
||||
## Tool Usage
|
||||
|
||||
**Gemini** (Primary):
|
||||
```bash
|
||||
--tool gemini # or omit (default)
|
||||
```
|
||||
|
||||
**Qwen** (Fallback):
|
||||
```bash
|
||||
--tool qwen
|
||||
```
|
||||
|
||||
**Codex** (Alternative):
|
||||
```bash
|
||||
--tool codex
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
Uses **cli-execution-agent** (default) for automated planning:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-execution-agent",
|
||||
description="Architecture planning with impact analysis",
|
||||
prompt=`
|
||||
Task: ${planning_task}
|
||||
Mode: plan
|
||||
Tool: ${tool_flag || 'gemini'}
|
||||
Directory: ${cd_path || '.'}
|
||||
Enhance: ${enhance_flag}
|
||||
Template: ~/.claude/workflows/cli-templates/prompts/planning/01-plan-architecture-design.txt
|
||||
|
||||
Execute strategic architecture planning:
|
||||
|
||||
1. Context Discovery:
|
||||
- Analyze current architecture structure
|
||||
- Identify affected components and modules
|
||||
- Map dependencies and integration points
|
||||
- Assess modification impacts (scope, complexity, risks)
|
||||
|
||||
2. Planning Analysis:
|
||||
- Apply strategic planning template
|
||||
- Generate modification plan with phases
|
||||
- Document architectural decisions and rationale
|
||||
- Identify potential conflicts and mitigation strategies
|
||||
|
||||
3. CLI Command Construction:
|
||||
- Tool: ${tool_flag || 'gemini'} (qwen fallback, codex for implementation guidance)
|
||||
- Directory: cd ${cd_path || '.'} &&
|
||||
- Context: @**/* (full architecture context)
|
||||
- Mode: analysis (read-only, no code generation)
|
||||
- Template: planning/01-plan-architecture-design.txt
|
||||
|
||||
4. Output Generation:
|
||||
- Strategic modification plan
|
||||
- Impact analysis and risk assessment
|
||||
- Implementation roadmap
|
||||
- Save to .workflow/WFS-[id]/.chat/plan-[timestamp].md (or .scratchpad/)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
## Core Rules
|
||||
|
||||
- **Read-only**: Creates modification plans, does NOT generate code
|
||||
- **Template**: `~/.claude/workflows/cli-templates/prompts/planning/01-plan-architecture-design.txt`
|
||||
- **Output**: `.workflow/WFS-[id]/.chat/plan-[timestamp].md` (or `.scratchpad/` if no session)
|
||||
@@ -1,112 +0,0 @@
|
||||
---
|
||||
name: enhance-prompt
|
||||
description: Enhanced prompt transformation using session memory and codebase analysis with --enhance flag detection
|
||||
argument-hint: "user input to enhance"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Systematically enhances user prompts by combining session memory context with codebase patterns, translating ambiguous requests into actionable specifications.
|
||||
|
||||
## Core Protocol
|
||||
|
||||
**Enhancement Pipeline:**
|
||||
`Intent Translation` → `Context Integration` → `Gemini Analysis (if needed)` → `Structured Output`
|
||||
|
||||
**Context Sources:**
|
||||
- Session memory (conversation history, previous analysis)
|
||||
- Codebase patterns (via Gemini when triggered)
|
||||
- Implicit technical requirements
|
||||
|
||||
## Gemini Trigger Logic
|
||||
|
||||
```pseudo
|
||||
FUNCTION should_use_gemini(user_prompt):
|
||||
critical_keywords = ["refactor", "migrate", "redesign", "auth", "payment", "security"]
|
||||
|
||||
RETURN (
|
||||
prompt_affects_multiple_modules(user_prompt, threshold=3) OR
|
||||
any_keyword_in_prompt(critical_keywords, user_prompt)
|
||||
)
|
||||
END
|
||||
```
|
||||
|
||||
**Gemini Integration:** ~/.claude/workflows/intelligent-tools-strategy.md
|
||||
|
||||
## Enhancement Rules
|
||||
|
||||
### Intent Translation
|
||||
|
||||
| User Says | Translate To | Focus |
|
||||
|-----------|--------------|-------|
|
||||
| "fix" | Debug and resolve | Root cause → preserve behavior |
|
||||
| "improve" | Enhance/optimize | Performance/readability |
|
||||
| "add" | Implement feature | Integration + edge cases |
|
||||
| "refactor" | Restructure quality | Maintain behavior |
|
||||
| "update" | Modernize | Version compatibility |
|
||||
|
||||
### Context Integration Strategy
|
||||
|
||||
**Session Memory First:**
|
||||
- Reference recent conversation context
|
||||
- Reuse previously identified patterns
|
||||
- Build on established understanding
|
||||
|
||||
**Codebase Analysis (via Gemini):**
|
||||
- Only when complexity requires it
|
||||
- Focus on integration points
|
||||
- Identify existing patterns
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# User: "add login"
|
||||
# Session Memory: Previous auth discussion, JWT mentioned
|
||||
# Inferred: JWT-based auth, integrate with existing session management
|
||||
# Gemini (if multi-module): Analyze AuthService patterns, middleware structure
|
||||
```
|
||||
|
||||
## Output Structure
|
||||
|
||||
```bash
|
||||
INTENT: [Clear technical goal]
|
||||
CONTEXT: [Session memory + codebase patterns]
|
||||
ACTION: [Specific implementation steps]
|
||||
ATTENTION: [Critical constraints]
|
||||
```
|
||||
|
||||
### Output Examples
|
||||
|
||||
**Simple (no Gemini):**
|
||||
```bash
|
||||
# Input: "fix login button"
|
||||
INTENT: Debug non-functional login button
|
||||
CONTEXT: From session - OAuth flow discussed, known state issue
|
||||
ACTION: Check event binding → verify state updates → test auth flow
|
||||
ATTENTION: Preserve existing OAuth integration
|
||||
```
|
||||
|
||||
**Complex (with Gemini):**
|
||||
```bash
|
||||
# Input: "refactor payment code"
|
||||
INTENT: Restructure payment module for maintainability
|
||||
CONTEXT: Session memory - PCI compliance requirements
|
||||
Gemini - PaymentService → StripeAdapter pattern identified
|
||||
ACTION: Extract reusable validators → isolate payment gateway logic
|
||||
ATTENTION: Zero behavior change, maintain PCI compliance, full test coverage
|
||||
```
|
||||
|
||||
## Automatic Triggers
|
||||
|
||||
- Ambiguous language: "fix", "improve", "clean up"
|
||||
- Multi-module impact (>3 modules)
|
||||
- Architecture changes
|
||||
- Critical systems: auth, payment, security
|
||||
- Complex refactoring
|
||||
|
||||
## Key Principles
|
||||
|
||||
1. **Memory First**: Leverage session context before analysis
|
||||
2. **Minimal Gemini**: Only when complexity demands it
|
||||
3. **Context Reuse**: Build on previous understanding
|
||||
4. **Clear Output**: Structured, actionable specifications
|
||||
5. **Avoid Duplication**: Reference existing context, don't repeat
|
||||
718
.claude/commands/issue/convert-to-plan.md
Normal file
718
.claude/commands/issue/convert-to-plan.md
Normal file
@@ -0,0 +1,718 @@
|
||||
---
|
||||
name: convert-to-plan
|
||||
description: Convert planning artifacts (lite-plan, workflow session, markdown) to issue solutions
|
||||
argument-hint: "[-y|--yes] [--issue <id>] [--supplement] <SOURCE>"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), Write(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip confirmation, auto-create issue and bind solution.
|
||||
|
||||
# Issue Convert-to-Plan Command (/issue:convert-to-plan)
|
||||
|
||||
## Overview
|
||||
|
||||
Converts various planning artifact formats into issue workflow solutions with intelligent detection and automatic binding.
|
||||
|
||||
**Supported Sources** (auto-detected):
|
||||
- **lite-plan**: `.workflow/.lite-plan/{slug}/plan.json`
|
||||
- **workflow-session**: `WFS-xxx` ID or `.workflow/active/{session}/` folder
|
||||
- **markdown**: Any `.md` file with implementation/task content
|
||||
- **json**: Direct JSON files matching plan-json-schema
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```bash
|
||||
# Convert lite-plan to new issue (auto-creates issue)
|
||||
/issue:convert-to-plan ".workflow/.lite-plan/implement-auth-2026-01-25"
|
||||
|
||||
# Convert workflow session to existing issue
|
||||
/issue:convert-to-plan WFS-auth-impl --issue GH-123
|
||||
|
||||
# Supplement existing solution with additional tasks
|
||||
/issue:convert-to-plan "./docs/additional-tasks.md" --issue ISS-001 --supplement
|
||||
|
||||
# Auto mode - skip confirmations
|
||||
/issue:convert-to-plan ".workflow/.lite-plan/my-plan" -y
|
||||
```
|
||||
|
||||
## Command Options
|
||||
|
||||
| Option | Description | Default |
|
||||
|--------|-------------|---------|
|
||||
| `<SOURCE>` | Planning artifact path or WFS-xxx ID | Required |
|
||||
| `--issue <id>` | Bind to existing issue instead of creating new | Auto-create |
|
||||
| `--supplement` | Add tasks to existing solution (requires --issue) | false |
|
||||
| `-y, --yes` | Skip all confirmations | false |
|
||||
|
||||
## Core Data Access Principle
|
||||
|
||||
**⚠️ Important**: Use CLI commands for all issue/solution operations.
|
||||
|
||||
| Operation | Correct | Incorrect |
|
||||
|-----------|---------|-----------|
|
||||
| Get issue | `ccw issue status <id> --json` | Read issues.jsonl directly |
|
||||
| Create issue | `ccw issue init <id> --title "..."` | Write to issues.jsonl |
|
||||
| Bind solution | `ccw issue bind <id> <sol-id>` | Edit issues.jsonl |
|
||||
| List solutions | `ccw issue solutions --issue <id> --brief` | Read solutions/*.jsonl |
|
||||
|
||||
## Solution Schema Reference
|
||||
|
||||
Target format for all extracted data (from solution-schema.json):
|
||||
|
||||
```typescript
|
||||
interface Solution {
|
||||
id: string; // SOL-{issue-id}-{4-char-uid}
|
||||
description?: string; // High-level summary
|
||||
approach?: string; // Technical strategy
|
||||
tasks: Task[]; // Required: at least 1 task
|
||||
exploration_context?: object; // Optional: source context
|
||||
analysis?: { risk, impact, complexity };
|
||||
score?: number; // 0.0-1.0
|
||||
is_bound: boolean;
|
||||
created_at: string;
|
||||
bound_at?: string;
|
||||
}
|
||||
|
||||
interface Task {
|
||||
id: string; // T1, T2, T3... (pattern: ^T[0-9]+$)
|
||||
title: string; // Required: action verb + target
|
||||
scope: string; // Required: module path or feature area
|
||||
action: Action; // Required: Create|Update|Implement|...
|
||||
description?: string;
|
||||
modification_points?: Array<{file, target, change}>;
|
||||
implementation: string[]; // Required: step-by-step guide
|
||||
test?: { unit?, integration?, commands?, coverage_target? };
|
||||
acceptance: { criteria: string[], verification: string[] }; // Required
|
||||
commit?: { type, scope, message_template, breaking? };
|
||||
depends_on?: string[];
|
||||
priority?: number; // 1-5 (default: 3)
|
||||
}
|
||||
|
||||
type Action = 'Create' | 'Update' | 'Implement' | 'Refactor' | 'Add' | 'Delete' | 'Configure' | 'Test' | 'Fix';
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Parse Arguments & Detect Source Type
|
||||
|
||||
```javascript
|
||||
const input = userInput.trim();
|
||||
const flags = parseFlags(userInput); // --issue, --supplement, -y/--yes
|
||||
|
||||
// Extract source path (first non-flag argument)
|
||||
const source = extractSourceArg(input);
|
||||
|
||||
// Detect source type
|
||||
function detectSourceType(source) {
|
||||
// Check for WFS-xxx pattern (workflow session ID)
|
||||
if (source.match(/^WFS-[\w-]+$/)) {
|
||||
return { type: 'workflow-session-id', path: `.workflow/active/${source}` };
|
||||
}
|
||||
|
||||
// Check if directory
|
||||
const isDir = Bash(`test -d "${source}" && echo "dir" || echo "file"`).trim() === 'dir';
|
||||
|
||||
if (isDir) {
|
||||
// Check for lite-plan indicator
|
||||
const hasPlanJson = Bash(`test -f "${source}/plan.json" && echo "yes" || echo "no"`).trim() === 'yes';
|
||||
if (hasPlanJson) {
|
||||
return { type: 'lite-plan', path: source };
|
||||
}
|
||||
|
||||
// Check for workflow session indicator
|
||||
const hasSession = Bash(`test -f "${source}/workflow-session.json" && echo "yes" || echo "no"`).trim() === 'yes';
|
||||
if (hasSession) {
|
||||
return { type: 'workflow-session', path: source };
|
||||
}
|
||||
}
|
||||
|
||||
// Check file extensions
|
||||
if (source.endsWith('.json')) {
|
||||
return { type: 'json-file', path: source };
|
||||
}
|
||||
if (source.endsWith('.md')) {
|
||||
return { type: 'markdown-file', path: source };
|
||||
}
|
||||
|
||||
// Check if path exists at all
|
||||
const exists = Bash(`test -e "${source}" && echo "yes" || echo "no"`).trim() === 'yes';
|
||||
if (!exists) {
|
||||
throw new Error(`E001: Source not found: ${source}`);
|
||||
}
|
||||
|
||||
return { type: 'unknown', path: source };
|
||||
}
|
||||
|
||||
const sourceInfo = detectSourceType(source);
|
||||
if (sourceInfo.type === 'unknown') {
|
||||
throw new Error(`E002: Unable to detect source format for: ${source}`);
|
||||
}
|
||||
|
||||
console.log(`Detected source type: ${sourceInfo.type}`);
|
||||
```
|
||||
|
||||
### Phase 2: Extract Data Using Format-Specific Extractor
|
||||
|
||||
```javascript
|
||||
let extracted = { title: '', approach: '', tasks: [], metadata: {} };
|
||||
|
||||
switch (sourceInfo.type) {
|
||||
case 'lite-plan':
|
||||
extracted = extractFromLitePlan(sourceInfo.path);
|
||||
break;
|
||||
case 'workflow-session':
|
||||
case 'workflow-session-id':
|
||||
extracted = extractFromWorkflowSession(sourceInfo.path);
|
||||
break;
|
||||
case 'markdown-file':
|
||||
extracted = await extractFromMarkdownAI(sourceInfo.path);
|
||||
break;
|
||||
case 'json-file':
|
||||
extracted = extractFromJsonFile(sourceInfo.path);
|
||||
break;
|
||||
}
|
||||
|
||||
// Validate extraction
|
||||
if (!extracted.tasks || extracted.tasks.length === 0) {
|
||||
throw new Error('E006: No tasks extracted from source');
|
||||
}
|
||||
|
||||
// Ensure task IDs are normalized to T1, T2, T3...
|
||||
extracted.tasks = normalizeTaskIds(extracted.tasks);
|
||||
|
||||
console.log(`Extracted: ${extracted.tasks.length} tasks`);
|
||||
```
|
||||
|
||||
#### Extractor: Lite-Plan
|
||||
|
||||
```javascript
|
||||
function extractFromLitePlan(folderPath) {
|
||||
const planJson = Read(`${folderPath}/plan.json`);
|
||||
const plan = JSON.parse(planJson);
|
||||
|
||||
return {
|
||||
title: plan.summary?.split('.')[0]?.trim() || 'Untitled Plan',
|
||||
description: plan.summary,
|
||||
approach: plan.approach,
|
||||
tasks: plan.tasks.map(t => ({
|
||||
id: t.id,
|
||||
title: t.title,
|
||||
scope: t.scope || '',
|
||||
action: t.action || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.verification ? {
|
||||
unit: t.verification.unit_tests,
|
||||
integration: t.verification.integration_tests,
|
||||
commands: t.verification.manual_checks
|
||||
} : {},
|
||||
acceptance: {
|
||||
criteria: Array.isArray(t.acceptance) ? t.acceptance : [t.acceptance || ''],
|
||||
verification: t.verification?.manual_checks || []
|
||||
},
|
||||
depends_on: t.depends_on || [],
|
||||
priority: 3
|
||||
})),
|
||||
metadata: {
|
||||
source_type: 'lite-plan',
|
||||
source_path: folderPath,
|
||||
complexity: plan.complexity,
|
||||
estimated_time: plan.estimated_time,
|
||||
exploration_angles: plan._metadata?.exploration_angles || [],
|
||||
original_timestamp: plan._metadata?.timestamp
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
#### Extractor: Workflow Session
|
||||
|
||||
```javascript
|
||||
function extractFromWorkflowSession(sessionPath) {
|
||||
// Load session metadata
|
||||
const sessionJson = Read(`${sessionPath}/workflow-session.json`);
|
||||
const session = JSON.parse(sessionJson);
|
||||
|
||||
// Load IMPL_PLAN.md for approach (if exists)
|
||||
let approach = '';
|
||||
const implPlanPath = `${sessionPath}/IMPL_PLAN.md`;
|
||||
const hasImplPlan = Bash(`test -f "${implPlanPath}" && echo "yes" || echo "no"`).trim() === 'yes';
|
||||
if (hasImplPlan) {
|
||||
const implPlan = Read(implPlanPath);
|
||||
// Extract overview/approach section
|
||||
const overviewMatch = implPlan.match(/##\s*(?:Overview|Approach|Strategy)\s*\n([\s\S]*?)(?=\n##|$)/i);
|
||||
approach = overviewMatch?.[1]?.trim() || implPlan.split('\n').slice(0, 10).join('\n');
|
||||
}
|
||||
|
||||
// Load all task JSONs from .task folder
|
||||
const taskFiles = Glob({ pattern: `${sessionPath}/.task/IMPL-*.json` });
|
||||
const tasks = taskFiles.map(f => {
|
||||
const taskJson = Read(f);
|
||||
const task = JSON.parse(taskJson);
|
||||
return {
|
||||
id: task.id?.replace(/^IMPL-0*/, 'T') || 'T1', // IMPL-001 → T1
|
||||
title: task.title,
|
||||
scope: task.scope || inferScopeFromTask(task),
|
||||
action: capitalizeAction(task.type) || 'Implement',
|
||||
description: task.description,
|
||||
modification_points: task.implementation?.modification_points || [],
|
||||
implementation: task.implementation?.steps || [],
|
||||
test: task.implementation?.test || {},
|
||||
acceptance: {
|
||||
criteria: task.acceptance_criteria || [],
|
||||
verification: task.verification_steps || []
|
||||
},
|
||||
commit: task.commit,
|
||||
depends_on: (task.depends_on || []).map(d => d.replace(/^IMPL-0*/, 'T')),
|
||||
priority: task.priority || 3
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
title: session.name || session.description?.split('.')[0] || 'Workflow Session',
|
||||
description: session.description || session.name,
|
||||
approach: approach || session.description,
|
||||
tasks: tasks,
|
||||
metadata: {
|
||||
source_type: 'workflow-session',
|
||||
source_path: sessionPath,
|
||||
session_id: session.id,
|
||||
created_at: session.created_at
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function inferScopeFromTask(task) {
|
||||
if (task.implementation?.modification_points?.length) {
|
||||
const files = task.implementation.modification_points.map(m => m.file);
|
||||
// Find common directory prefix
|
||||
const dirs = files.map(f => f.split('/').slice(0, -1).join('/'));
|
||||
return [...new Set(dirs)][0] || '';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
function capitalizeAction(type) {
|
||||
if (!type) return 'Implement';
|
||||
const map = { feature: 'Implement', bugfix: 'Fix', refactor: 'Refactor', test: 'Test', docs: 'Update' };
|
||||
return map[type.toLowerCase()] || type.charAt(0).toUpperCase() + type.slice(1);
|
||||
}
|
||||
```
|
||||
|
||||
#### Extractor: Markdown (AI-Assisted via Gemini)
|
||||
|
||||
```javascript
|
||||
async function extractFromMarkdownAI(filePath) {
|
||||
const fileContent = Read(filePath);
|
||||
|
||||
// Use Gemini CLI for intelligent extraction
|
||||
const cliPrompt = `PURPOSE: Extract implementation plan from markdown document for issue solution conversion. Must output ONLY valid JSON.
|
||||
TASK: • Analyze document structure • Identify title/summary • Extract approach/strategy section • Parse tasks from any format (lists, tables, sections, code blocks) • Normalize each task to solution schema
|
||||
MODE: analysis
|
||||
CONTEXT: Document content provided below
|
||||
EXPECTED: Valid JSON object with format:
|
||||
{
|
||||
"title": "extracted title",
|
||||
"approach": "extracted approach/strategy",
|
||||
"tasks": [
|
||||
{
|
||||
"id": "T1",
|
||||
"title": "task title",
|
||||
"scope": "module or feature area",
|
||||
"action": "Implement|Update|Create|Fix|Refactor|Add|Delete|Configure|Test",
|
||||
"description": "what to do",
|
||||
"implementation": ["step 1", "step 2"],
|
||||
"acceptance": ["criteria 1", "criteria 2"]
|
||||
}
|
||||
]
|
||||
}
|
||||
CONSTRAINTS: Output ONLY valid JSON - no markdown, no explanation | Action must be one of: Create, Update, Implement, Refactor, Add, Delete, Configure, Test, Fix | Tasks must have id, title, scope, action, implementation (array), acceptance (array)
|
||||
|
||||
DOCUMENT CONTENT:
|
||||
${fileContent}`;
|
||||
|
||||
// Execute Gemini CLI
|
||||
const result = Bash(`ccw cli -p '${cliPrompt.replace(/'/g, "'\\''")}' --tool gemini --mode analysis`, { timeout: 120000 });
|
||||
|
||||
// Parse JSON from result (may be wrapped in markdown code block)
|
||||
let jsonText = result.trim();
|
||||
const jsonMatch = jsonText.match(/```(?:json)?\s*([\s\S]*?)```/);
|
||||
if (jsonMatch) {
|
||||
jsonText = jsonMatch[1].trim();
|
||||
}
|
||||
|
||||
try {
|
||||
const extracted = JSON.parse(jsonText);
|
||||
|
||||
// Normalize tasks
|
||||
const tasks = (extracted.tasks || []).map((t, i) => ({
|
||||
id: t.id || `T${i + 1}`,
|
||||
title: t.title || 'Untitled task',
|
||||
scope: t.scope || '',
|
||||
action: validateAction(t.action) || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.test || {},
|
||||
acceptance: {
|
||||
criteria: Array.isArray(t.acceptance) ? t.acceptance : [t.acceptance || ''],
|
||||
verification: t.verification || []
|
||||
},
|
||||
depends_on: t.depends_on || [],
|
||||
priority: t.priority || 3
|
||||
}));
|
||||
|
||||
return {
|
||||
title: extracted.title || 'Extracted Plan',
|
||||
description: extracted.summary || extracted.title,
|
||||
approach: extracted.approach || '',
|
||||
tasks: tasks,
|
||||
metadata: {
|
||||
source_type: 'markdown',
|
||||
source_path: filePath,
|
||||
extraction_method: 'gemini-ai'
|
||||
}
|
||||
};
|
||||
} catch (e) {
|
||||
// Provide more context for debugging
|
||||
throw new Error(`E005: Failed to extract tasks from markdown. Gemini response was not valid JSON. Error: ${e.message}. Response preview: ${jsonText.substring(0, 200)}...`);
|
||||
}
|
||||
}
|
||||
|
||||
function validateAction(action) {
|
||||
const validActions = ['Create', 'Update', 'Implement', 'Refactor', 'Add', 'Delete', 'Configure', 'Test', 'Fix'];
|
||||
if (!action) return null;
|
||||
const normalized = action.charAt(0).toUpperCase() + action.slice(1).toLowerCase();
|
||||
return validActions.includes(normalized) ? normalized : null;
|
||||
}
|
||||
```
|
||||
|
||||
#### Extractor: JSON File
|
||||
|
||||
```javascript
|
||||
function extractFromJsonFile(filePath) {
|
||||
const content = Read(filePath);
|
||||
const plan = JSON.parse(content);
|
||||
|
||||
// Detect if it's already solution format or plan format
|
||||
if (plan.tasks && Array.isArray(plan.tasks)) {
|
||||
// Map tasks to normalized format
|
||||
const tasks = plan.tasks.map((t, i) => ({
|
||||
id: t.id || `T${i + 1}`,
|
||||
title: t.title,
|
||||
scope: t.scope || '',
|
||||
action: t.action || 'Implement',
|
||||
description: t.description || t.title,
|
||||
modification_points: t.modification_points || [],
|
||||
implementation: Array.isArray(t.implementation) ? t.implementation : [t.implementation || ''],
|
||||
test: t.test || t.verification || {},
|
||||
acceptance: normalizeAcceptance(t.acceptance),
|
||||
depends_on: t.depends_on || [],
|
||||
priority: t.priority || 3
|
||||
}));
|
||||
|
||||
return {
|
||||
title: plan.summary?.split('.')[0] || plan.title || 'JSON Plan',
|
||||
description: plan.summary || plan.description,
|
||||
approach: plan.approach,
|
||||
tasks: tasks,
|
||||
metadata: {
|
||||
source_type: 'json',
|
||||
source_path: filePath,
|
||||
complexity: plan.complexity,
|
||||
original_metadata: plan._metadata
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
throw new Error('E002: JSON file does not contain valid plan structure (missing tasks array)');
|
||||
}
|
||||
|
||||
function normalizeAcceptance(acceptance) {
|
||||
if (!acceptance) return { criteria: [], verification: [] };
|
||||
if (typeof acceptance === 'object' && acceptance.criteria) return acceptance;
|
||||
if (Array.isArray(acceptance)) return { criteria: acceptance, verification: [] };
|
||||
return { criteria: [String(acceptance)], verification: [] };
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Normalize Task IDs
|
||||
|
||||
```javascript
|
||||
function normalizeTaskIds(tasks) {
|
||||
return tasks.map((t, i) => ({
|
||||
...t,
|
||||
id: `T${i + 1}`,
|
||||
// Also normalize depends_on references
|
||||
depends_on: (t.depends_on || []).map(d => {
|
||||
// Handle various ID formats: IMPL-001, T1, 1, etc.
|
||||
const num = d.match(/\d+/)?.[0];
|
||||
return num ? `T${parseInt(num)}` : d;
|
||||
})
|
||||
}));
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Resolve Issue (Create or Find)
|
||||
|
||||
```javascript
|
||||
let issueId = flags.issue;
|
||||
let existingSolution = null;
|
||||
|
||||
if (issueId) {
|
||||
// Validate issue exists
|
||||
let issueCheck;
|
||||
try {
|
||||
issueCheck = Bash(`ccw issue status ${issueId} --json 2>/dev/null`).trim();
|
||||
if (!issueCheck || issueCheck === '') {
|
||||
throw new Error('empty response');
|
||||
}
|
||||
} catch (e) {
|
||||
throw new Error(`E003: Issue not found: ${issueId}`);
|
||||
}
|
||||
|
||||
const issue = JSON.parse(issueCheck);
|
||||
|
||||
// Check if issue already has bound solution
|
||||
if (issue.bound_solution_id && !flags.supplement) {
|
||||
throw new Error(`E004: Issue ${issueId} already has bound solution (${issue.bound_solution_id}). Use --supplement to add tasks.`);
|
||||
}
|
||||
|
||||
// Load existing solution for supplement mode
|
||||
if (flags.supplement && issue.bound_solution_id) {
|
||||
try {
|
||||
const solResult = Bash(`ccw issue solution ${issue.bound_solution_id} --json`).trim();
|
||||
existingSolution = JSON.parse(solResult);
|
||||
console.log(`Loaded existing solution with ${existingSolution.tasks.length} tasks`);
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to load existing solution: ${e.message}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create new issue via ccw issue create (auto-generates correct ID)
|
||||
// Smart extraction: title from content, priority from complexity
|
||||
const title = extracted.title || 'Converted Plan';
|
||||
const context = extracted.description || extracted.approach || title;
|
||||
|
||||
// Auto-determine priority based on complexity
|
||||
const complexityMap = { high: 2, medium: 3, low: 4 };
|
||||
const priority = complexityMap[extracted.metadata.complexity?.toLowerCase()] || 3;
|
||||
|
||||
try {
|
||||
// Use heredoc to avoid shell escaping issues
|
||||
const createResult = Bash(`ccw issue create << 'EOF'
|
||||
{
|
||||
"title": ${JSON.stringify(title)},
|
||||
"context": ${JSON.stringify(context)},
|
||||
"priority": ${priority},
|
||||
"source": "converted"
|
||||
}
|
||||
EOF`).trim();
|
||||
|
||||
// Parse result to get created issue ID
|
||||
const created = JSON.parse(createResult);
|
||||
issueId = created.id;
|
||||
console.log(`Created issue: ${issueId} (priority: ${priority})`);
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to create issue: ${e.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: Generate Solution
|
||||
|
||||
```javascript
|
||||
// Generate solution ID
|
||||
function generateSolutionId(issueId) {
|
||||
const chars = 'abcdefghijklmnopqrstuvwxyz0123456789';
|
||||
let uid = '';
|
||||
for (let i = 0; i < 4; i++) {
|
||||
uid += chars[Math.floor(Math.random() * chars.length)];
|
||||
}
|
||||
return `SOL-${issueId}-${uid}`;
|
||||
}
|
||||
|
||||
let solution;
|
||||
const solutionId = generateSolutionId(issueId);
|
||||
|
||||
if (flags.supplement && existingSolution) {
|
||||
// Supplement mode: merge with existing solution
|
||||
const maxTaskId = Math.max(...existingSolution.tasks.map(t => parseInt(t.id.slice(1))));
|
||||
|
||||
const newTasks = extracted.tasks.map((t, i) => ({
|
||||
...t,
|
||||
id: `T${maxTaskId + i + 1}`
|
||||
}));
|
||||
|
||||
solution = {
|
||||
...existingSolution,
|
||||
tasks: [...existingSolution.tasks, ...newTasks],
|
||||
approach: existingSolution.approach + '\n\n[Supplementary] ' + (extracted.approach || ''),
|
||||
updated_at: new Date().toISOString()
|
||||
};
|
||||
|
||||
console.log(`Supplementing: ${existingSolution.tasks.length} existing + ${newTasks.length} new = ${solution.tasks.length} total tasks`);
|
||||
} else {
|
||||
// New solution
|
||||
solution = {
|
||||
id: solutionId,
|
||||
description: extracted.description || extracted.title,
|
||||
approach: extracted.approach,
|
||||
tasks: extracted.tasks,
|
||||
exploration_context: extracted.metadata.exploration_angles ? {
|
||||
exploration_angles: extracted.metadata.exploration_angles
|
||||
} : undefined,
|
||||
analysis: {
|
||||
risk: 'medium',
|
||||
impact: 'medium',
|
||||
complexity: extracted.metadata.complexity?.toLowerCase() || 'medium'
|
||||
},
|
||||
is_bound: false,
|
||||
created_at: new Date().toISOString(),
|
||||
_conversion_metadata: {
|
||||
source_type: extracted.metadata.source_type,
|
||||
source_path: extracted.metadata.source_path,
|
||||
converted_at: new Date().toISOString()
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 6: Confirm & Persist
|
||||
|
||||
```javascript
|
||||
// Display preview
|
||||
console.log(`
|
||||
## Conversion Summary
|
||||
|
||||
**Issue**: ${issueId}
|
||||
**Solution**: ${flags.supplement ? existingSolution.id : solutionId}
|
||||
**Tasks**: ${solution.tasks.length}
|
||||
**Mode**: ${flags.supplement ? 'Supplement' : 'New'}
|
||||
|
||||
### Tasks:
|
||||
${solution.tasks.map(t => `- ${t.id}: ${t.title} [${t.action}]`).join('\n')}
|
||||
`);
|
||||
|
||||
// Confirm if not auto mode
|
||||
if (!flags.yes && !flags.y) {
|
||||
const confirm = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Create solution for issue ${issueId} with ${solution.tasks.length} tasks?`,
|
||||
header: 'Confirm',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Yes, create solution', description: 'Create and bind solution' },
|
||||
{ label: 'Cancel', description: 'Abort without changes' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
if (!confirm.answers?.['Confirm']?.includes('Yes')) {
|
||||
console.log('Cancelled.');
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Persist solution (following issue-plan-agent pattern)
|
||||
Bash(`mkdir -p .workflow/issues/solutions`);
|
||||
|
||||
const solutionFile = `.workflow/issues/solutions/${issueId}.jsonl`;
|
||||
|
||||
if (flags.supplement) {
|
||||
// Supplement mode: update existing solution line atomically
|
||||
try {
|
||||
const existingContent = Read(solutionFile);
|
||||
const lines = existingContent.trim().split('\n').filter(l => l);
|
||||
const updatedLines = lines.map(line => {
|
||||
const sol = JSON.parse(line);
|
||||
if (sol.id === existingSolution.id) {
|
||||
return JSON.stringify(solution);
|
||||
}
|
||||
return line;
|
||||
});
|
||||
// Atomic write: write entire content at once
|
||||
Write({ file_path: solutionFile, content: updatedLines.join('\n') + '\n' });
|
||||
console.log(`✓ Updated solution: ${existingSolution.id}`);
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to update solution: ${e.message}`);
|
||||
}
|
||||
|
||||
// Note: No need to rebind - solution is already bound to issue
|
||||
} else {
|
||||
// New solution: append to JSONL file (following issue-plan-agent pattern)
|
||||
try {
|
||||
const solutionLine = JSON.stringify(solution);
|
||||
|
||||
// Read existing content, append new line, write atomically
|
||||
const existing = Bash(`test -f "${solutionFile}" && cat "${solutionFile}" || echo ""`).trim();
|
||||
const newContent = existing ? existing + '\n' + solutionLine + '\n' : solutionLine + '\n';
|
||||
Write({ file_path: solutionFile, content: newContent });
|
||||
|
||||
console.log(`✓ Created solution: ${solutionId}`);
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to write solution: ${e.message}`);
|
||||
}
|
||||
|
||||
// Bind solution to issue
|
||||
try {
|
||||
Bash(`ccw issue bind ${issueId} ${solutionId}`);
|
||||
console.log(`✓ Bound solution to issue`);
|
||||
} catch (e) {
|
||||
// Cleanup: remove solution file on bind failure
|
||||
try {
|
||||
Bash(`rm -f "${solutionFile}"`);
|
||||
} catch (cleanupError) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
throw new Error(`Failed to bind solution: ${e.message}`);
|
||||
}
|
||||
|
||||
// Update issue status to planned
|
||||
try {
|
||||
Bash(`ccw issue update ${issueId} --status planned`);
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to update issue status: ${e.message}`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 7: Summary
|
||||
|
||||
```javascript
|
||||
console.log(`
|
||||
## Done
|
||||
|
||||
**Issue**: ${issueId}
|
||||
**Solution**: ${flags.supplement ? existingSolution.id : solutionId}
|
||||
**Tasks**: ${solution.tasks.length}
|
||||
**Status**: planned
|
||||
|
||||
### Next Steps:
|
||||
- \`/issue:queue\` → Form execution queue
|
||||
- \`ccw issue status ${issueId}\` → View issue details
|
||||
- \`ccw issue solution ${flags.supplement ? existingSolution.id : solutionId}\` → View solution
|
||||
`);
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Code | Resolution |
|
||||
|-------|------|------------|
|
||||
| Source not found | E001 | Check path exists |
|
||||
| Invalid source format | E002 | Verify file contains valid plan structure |
|
||||
| Issue not found | E003 | Check issue ID or omit --issue to create new |
|
||||
| Solution already bound | E004 | Use --supplement to add tasks |
|
||||
| AI extraction failed | E005 | Check markdown structure, try simpler format |
|
||||
| No tasks extracted | E006 | Source must contain at least 1 task |
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/issue:plan` - Generate solutions from issue exploration
|
||||
- `/issue:queue` - Form execution queue from bound solutions
|
||||
- `/issue:execute` - Execute queue with DAG parallelism
|
||||
- `ccw issue status <id>` - View issue details
|
||||
- `ccw issue solution <id>` - View solution details
|
||||
768
.claude/commands/issue/discover-by-prompt.md
Normal file
768
.claude/commands/issue/discover-by-prompt.md
Normal file
@@ -0,0 +1,768 @@
|
||||
---
|
||||
name: issue:discover-by-prompt
|
||||
description: Discover issues from user prompt with Gemini-planned iterative multi-agent exploration. Uses ACE semantic search for context gathering and supports cross-module comparison (e.g., frontend vs backend API contracts).
|
||||
argument-hint: "[-y|--yes] <prompt> [--scope=src/**] [--depth=standard|deep] [--max-iterations=5]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), AskUserQuestion(*), Glob(*), Grep(*), mcp__ace-tool__search_context(*), mcp__exa__search(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-continue all iterations, skip confirmations.
|
||||
|
||||
# Issue Discovery by Prompt
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Discover issues based on user description
|
||||
/issue:discover-by-prompt "Check if frontend API calls match backend implementations"
|
||||
|
||||
# Compare specific modules
|
||||
/issue:discover-by-prompt "Verify auth flow consistency between mobile and web clients" --scope=src/auth/**,src/mobile/**
|
||||
|
||||
# Deep exploration with more iterations
|
||||
/issue:discover-by-prompt "Find all places where error handling is inconsistent" --depth=deep --max-iterations=8
|
||||
|
||||
# Focused backend-frontend contract check
|
||||
/issue:discover-by-prompt "Compare REST API definitions with frontend fetch calls"
|
||||
```
|
||||
|
||||
**Core Difference from `/issue:discover`**:
|
||||
- `discover`: Pre-defined perspectives (bug, security, etc.), parallel execution
|
||||
- `discover-by-prompt`: User-driven prompt, Gemini-planned strategy, iterative exploration
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
|
||||
Prompt-driven issue discovery with intelligent planning. Instead of fixed perspectives, this command:
|
||||
|
||||
1. **Analyzes user intent** via Gemini to understand what to find
|
||||
2. **Plans exploration strategy** dynamically based on codebase structure
|
||||
3. **Executes iterative multi-agent exploration** with feedback loops
|
||||
4. **Performs cross-module comparison** when detecting comparison intent
|
||||
|
||||
### Value Proposition
|
||||
|
||||
1. **Natural Language Input**: Describe what you want to find, not how to find it
|
||||
2. **Intelligent Planning**: Gemini designs optimal exploration strategy
|
||||
3. **Iterative Refinement**: Each round builds on previous discoveries
|
||||
4. **Cross-Module Analysis**: Compare frontend/backend, mobile/web, old/new implementations
|
||||
5. **Adaptive Exploration**: Adjusts direction based on findings
|
||||
|
||||
### Use Cases
|
||||
|
||||
| Scenario | Example Prompt |
|
||||
|----------|----------------|
|
||||
| API Contract | "Check if frontend calls match backend endpoints" |
|
||||
| Error Handling | "Find inconsistent error handling patterns" |
|
||||
| Migration Gap | "Compare old auth with new auth implementation" |
|
||||
| Feature Parity | "Verify mobile has all web features" |
|
||||
| Schema Drift | "Check if TypeScript types match API responses" |
|
||||
| Integration | "Find mismatches between service A and service B" |
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Prompt Analysis & Initialization
|
||||
├─ Parse user prompt and flags
|
||||
├─ Detect exploration intent (comparison/search/verification)
|
||||
└─ Initialize discovery session
|
||||
|
||||
Phase 1.5: ACE Context Gathering
|
||||
├─ Use ACE semantic search to understand codebase structure
|
||||
├─ Identify relevant modules based on prompt keywords
|
||||
├─ Collect architecture context for Gemini planning
|
||||
└─ Build initial context package
|
||||
|
||||
Phase 2: Gemini Strategy Planning
|
||||
├─ Feed ACE context + prompt to Gemini CLI
|
||||
├─ Gemini analyzes and generates exploration strategy
|
||||
├─ Create exploration dimensions with search targets
|
||||
├─ Define comparison matrix (if comparison intent)
|
||||
└─ Set success criteria and iteration limits
|
||||
|
||||
Phase 3: Iterative Agent Exploration (with ACE)
|
||||
├─ Iteration 1: Initial exploration by assigned agents
|
||||
│ ├─ Agent A: ACE search + explore dimension 1
|
||||
│ ├─ Agent B: ACE search + explore dimension 2
|
||||
│ └─ Collect findings, update shared context
|
||||
├─ Iteration 2-N: Refined exploration
|
||||
│ ├─ Analyze previous findings
|
||||
│ ├─ ACE search for related code paths
|
||||
│ ├─ Execute targeted exploration
|
||||
│ └─ Update cumulative findings
|
||||
└─ Termination: Max iterations or convergence
|
||||
|
||||
Phase 4: Cross-Analysis & Synthesis
|
||||
├─ Compare findings across dimensions
|
||||
├─ Identify discrepancies and issues
|
||||
├─ Calculate confidence scores
|
||||
└─ Generate issue candidates
|
||||
|
||||
Phase 5: Issue Generation & Summary
|
||||
├─ Convert findings to issue format
|
||||
├─ Write discovery outputs
|
||||
└─ Prompt user for next action
|
||||
```
|
||||
|
||||
### Exploration Dimensions
|
||||
|
||||
Dimensions are **dynamically generated by Gemini** based on the user prompt. Not limited to predefined categories.
|
||||
|
||||
**Examples**:
|
||||
|
||||
| Prompt | Generated Dimensions |
|
||||
|--------|---------------------|
|
||||
| "Check API contracts" | frontend-calls, backend-handlers |
|
||||
| "Find auth issues" | auth-module (single dimension) |
|
||||
| "Compare old/new implementations" | legacy-code, new-code |
|
||||
| "Audit payment flow" | payment-service, validation, logging |
|
||||
| "Find error handling gaps" | error-handlers, error-types, recovery-logic |
|
||||
|
||||
Gemini analyzes the prompt + ACE context to determine:
|
||||
- How many dimensions are needed (1 to N)
|
||||
- What each dimension should focus on
|
||||
- Whether comparison is needed between dimensions
|
||||
|
||||
### Iteration Strategy
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Iteration Loop │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ 1. Plan: What to explore this iteration │
|
||||
│ └─ Based on: previous findings + unexplored areas │
|
||||
│ │
|
||||
│ 2. Execute: Launch agents for this iteration │
|
||||
│ └─ Each agent: explore → collect → return summary │
|
||||
│ │
|
||||
│ 3. Analyze: Process iteration results │
|
||||
│ └─ New findings? Gaps? Contradictions? │
|
||||
│ │
|
||||
│ 4. Decide: Continue or terminate │
|
||||
│ └─ Terminate if: max iterations OR convergence OR │
|
||||
│ high confidence on all questions │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Phase 1: Prompt Analysis & Initialization
|
||||
|
||||
```javascript
|
||||
// Step 1: Parse arguments
|
||||
const { prompt, scope, depth, maxIterations } = parseArgs(args);
|
||||
|
||||
// Step 2: Generate discovery ID
|
||||
const discoveryId = `DBP-${formatDate(new Date(), 'YYYYMMDD-HHmmss')}`;
|
||||
|
||||
// Step 3: Create output directory
|
||||
const outputDir = `.workflow/issues/discoveries/${discoveryId}`;
|
||||
await mkdir(outputDir, { recursive: true });
|
||||
await mkdir(`${outputDir}/iterations`, { recursive: true });
|
||||
|
||||
// Step 4: Detect intent type from prompt
|
||||
const intentType = detectIntent(prompt);
|
||||
// Returns: 'comparison' | 'search' | 'verification' | 'audit'
|
||||
|
||||
// Step 5: Initialize discovery state
|
||||
await writeJson(`${outputDir}/discovery-state.json`, {
|
||||
discovery_id: discoveryId,
|
||||
type: 'prompt-driven',
|
||||
prompt: prompt,
|
||||
intent_type: intentType,
|
||||
scope: scope || '**/*',
|
||||
depth: depth || 'standard',
|
||||
max_iterations: maxIterations || 5,
|
||||
phase: 'initialization',
|
||||
created_at: new Date().toISOString(),
|
||||
iterations: [],
|
||||
cumulative_findings: [],
|
||||
comparison_matrix: null // filled for comparison intent
|
||||
});
|
||||
```
|
||||
|
||||
### Phase 1.5: ACE Context Gathering
|
||||
|
||||
**Purpose**: Use ACE semantic search to gather codebase context before Gemini planning.
|
||||
|
||||
```javascript
|
||||
// Step 1: Extract keywords from prompt for semantic search
|
||||
const keywords = extractKeywords(prompt);
|
||||
// e.g., "frontend API calls match backend" → ["frontend", "API", "backend", "endpoints"]
|
||||
|
||||
// Step 2: Use ACE to understand codebase structure
|
||||
const aceQueries = [
|
||||
`Project architecture and module structure for ${keywords.join(', ')}`,
|
||||
`Where are ${keywords[0]} implementations located?`,
|
||||
`How does ${keywords.slice(0, 2).join(' ')} work in this codebase?`
|
||||
];
|
||||
|
||||
const aceResults = [];
|
||||
for (const query of aceQueries) {
|
||||
const result = await mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: query
|
||||
});
|
||||
aceResults.push({ query, result });
|
||||
}
|
||||
|
||||
// Step 3: Build context package for Gemini (kept in memory)
|
||||
const aceContext = {
|
||||
prompt_keywords: keywords,
|
||||
codebase_structure: aceResults[0].result,
|
||||
relevant_modules: aceResults.slice(1).map(r => r.result),
|
||||
detected_patterns: extractPatterns(aceResults)
|
||||
};
|
||||
|
||||
// Step 4: Update state (no separate file)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'context-gathered',
|
||||
ace_context: {
|
||||
queries_executed: aceQueries.length,
|
||||
modules_identified: aceContext.relevant_modules.length
|
||||
}
|
||||
});
|
||||
|
||||
// aceContext passed to Phase 2 in memory
|
||||
```
|
||||
|
||||
**ACE Query Strategy by Intent Type**:
|
||||
|
||||
| Intent | ACE Queries |
|
||||
|--------|-------------|
|
||||
| **comparison** | "frontend API calls", "backend API handlers", "API contract definitions" |
|
||||
| **search** | "{keyword} implementations", "{keyword} usage patterns" |
|
||||
| **verification** | "expected behavior for {feature}", "test coverage for {feature}" |
|
||||
| **audit** | "all {category} patterns", "{category} security concerns" |
|
||||
|
||||
### Phase 2: Gemini Strategy Planning
|
||||
|
||||
**Purpose**: Gemini analyzes user prompt + ACE context to design optimal exploration strategy.
|
||||
|
||||
```javascript
|
||||
// Step 1: Load ACE context gathered in Phase 1.5
|
||||
const aceContext = await readJson(`${outputDir}/ace-context.json`);
|
||||
|
||||
// Step 2: Build Gemini planning prompt with ACE context
|
||||
const planningPrompt = `
|
||||
PURPOSE: Analyze discovery prompt and create exploration strategy based on codebase context
|
||||
TASK:
|
||||
• Parse user intent from prompt: "${prompt}"
|
||||
• Use codebase context to identify specific modules and files to explore
|
||||
• Create exploration dimensions with precise search targets
|
||||
• Define comparison matrix structure (if comparison intent)
|
||||
• Set success criteria and iteration strategy
|
||||
MODE: analysis
|
||||
CONTEXT: @${scope || '**/*'} | Discovery type: ${intentType}
|
||||
|
||||
## Codebase Context (from ACE semantic search)
|
||||
${JSON.stringify(aceContext, null, 2)}
|
||||
|
||||
EXPECTED: JSON exploration plan following exploration-plan-schema.json:
|
||||
{
|
||||
"intent_analysis": { "type": "${intentType}", "primary_question": "...", "sub_questions": [...] },
|
||||
"dimensions": [{ "name": "...", "description": "...", "search_targets": [...], "focus_areas": [...], "agent_prompt": "..." }],
|
||||
"comparison_matrix": { "dimension_a": "...", "dimension_b": "...", "comparison_points": [...] },
|
||||
"success_criteria": [...],
|
||||
"estimated_iterations": N,
|
||||
"termination_conditions": [...]
|
||||
}
|
||||
CONSTRAINTS: Use ACE context to inform targets | Focus on actionable plan
|
||||
`;
|
||||
|
||||
// Step 3: Execute Gemini planning
|
||||
Bash({
|
||||
command: `ccw cli -p "${planningPrompt}" --tool gemini --mode analysis`,
|
||||
run_in_background: true,
|
||||
timeout: 300000
|
||||
});
|
||||
|
||||
// Step 4: Parse Gemini output and validate against schema
|
||||
const explorationPlan = await parseGeminiPlanOutput(geminiResult);
|
||||
validateAgainstSchema(explorationPlan, 'exploration-plan-schema.json');
|
||||
|
||||
// Step 5: Enhance plan with ACE-discovered file paths
|
||||
explorationPlan.dimensions = explorationPlan.dimensions.map(dim => ({
|
||||
...dim,
|
||||
ace_suggested_files: aceContext.relevant_modules
|
||||
.filter(m => m.relevance_to === dim.name)
|
||||
.map(m => m.file_path)
|
||||
}));
|
||||
|
||||
// Step 6: Update state (plan kept in memory, not persisted)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'planned',
|
||||
exploration_plan: {
|
||||
dimensions_count: explorationPlan.dimensions.length,
|
||||
has_comparison_matrix: !!explorationPlan.comparison_matrix,
|
||||
estimated_iterations: explorationPlan.estimated_iterations
|
||||
}
|
||||
});
|
||||
|
||||
// explorationPlan passed to Phase 3 in memory
|
||||
```
|
||||
|
||||
**Gemini Planning Responsibilities**:
|
||||
|
||||
| Responsibility | Input | Output |
|
||||
|----------------|-------|--------|
|
||||
| Intent Analysis | User prompt | type, primary_question, sub_questions |
|
||||
| Dimension Design | ACE context + prompt | dimensions with search_targets |
|
||||
| Comparison Matrix | Intent type + modules | comparison_points (if applicable) |
|
||||
| Iteration Strategy | Depth setting | estimated_iterations, termination_conditions |
|
||||
|
||||
**Gemini Planning Output Schema**:
|
||||
|
||||
```json
|
||||
{
|
||||
"intent_analysis": {
|
||||
"type": "comparison|search|verification|audit",
|
||||
"primary_question": "string",
|
||||
"sub_questions": ["string"]
|
||||
},
|
||||
"dimensions": [
|
||||
{
|
||||
"name": "frontend",
|
||||
"description": "Client-side API calls and error handling",
|
||||
"search_targets": ["src/api/**", "src/hooks/**"],
|
||||
"focus_areas": ["fetch calls", "error boundaries", "response parsing"],
|
||||
"agent_prompt": "Explore frontend API consumption patterns..."
|
||||
},
|
||||
{
|
||||
"name": "backend",
|
||||
"description": "Server-side API implementations",
|
||||
"search_targets": ["src/server/**", "src/routes/**"],
|
||||
"focus_areas": ["endpoint handlers", "response schemas", "error responses"],
|
||||
"agent_prompt": "Explore backend API implementations..."
|
||||
}
|
||||
],
|
||||
"comparison_matrix": {
|
||||
"dimension_a": "frontend",
|
||||
"dimension_b": "backend",
|
||||
"comparison_points": [
|
||||
{"aspect": "endpoints", "frontend_check": "fetch URLs", "backend_check": "route paths"},
|
||||
{"aspect": "methods", "frontend_check": "HTTP methods used", "backend_check": "methods accepted"},
|
||||
{"aspect": "payloads", "frontend_check": "request body structure", "backend_check": "expected schema"},
|
||||
{"aspect": "responses", "frontend_check": "response parsing", "backend_check": "response format"},
|
||||
{"aspect": "errors", "frontend_check": "error handling", "backend_check": "error responses"}
|
||||
]
|
||||
},
|
||||
"success_criteria": [
|
||||
"All API endpoints mapped between frontend and backend",
|
||||
"Discrepancies identified with file:line references",
|
||||
"Each finding includes remediation suggestion"
|
||||
],
|
||||
"estimated_iterations": 3,
|
||||
"termination_conditions": [
|
||||
"All comparison points verified",
|
||||
"No new findings in last iteration",
|
||||
"Confidence > 0.8 on primary question"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Iterative Agent Exploration (with ACE)
|
||||
|
||||
**Purpose**: Multi-agent iterative exploration using ACE for semantic search within each iteration.
|
||||
|
||||
```javascript
|
||||
let iteration = 0;
|
||||
let cumulativeFindings = [];
|
||||
let sharedContext = { aceDiscoveries: [], crossReferences: [] };
|
||||
let shouldContinue = true;
|
||||
|
||||
while (shouldContinue && iteration < maxIterations) {
|
||||
iteration++;
|
||||
const iterationDir = `${outputDir}/iterations/${iteration}`;
|
||||
await mkdir(iterationDir, { recursive: true });
|
||||
|
||||
// Step 1: ACE-assisted iteration planning
|
||||
// Use previous findings to guide ACE queries for this iteration
|
||||
const iterationAceQueries = iteration === 1
|
||||
? explorationPlan.dimensions.map(d => d.focus_areas[0]) // Initial queries from plan
|
||||
: deriveQueriesFromFindings(cumulativeFindings); // Follow-up queries from findings
|
||||
|
||||
// Execute ACE searches to find related code
|
||||
const iterationAceResults = [];
|
||||
for (const query of iterationAceQueries) {
|
||||
const result = await mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: `${query} in ${explorationPlan.scope}`
|
||||
});
|
||||
iterationAceResults.push({ query, result });
|
||||
}
|
||||
|
||||
// Update shared context with ACE discoveries
|
||||
sharedContext.aceDiscoveries.push(...iterationAceResults);
|
||||
|
||||
// Step 2: Plan this iteration based on ACE results
|
||||
const iterationPlan = planIteration(iteration, explorationPlan, cumulativeFindings, iterationAceResults);
|
||||
|
||||
// Step 3: Launch dimension agents with ACE context
|
||||
const agentPromises = iterationPlan.dimensions.map(dimension =>
|
||||
Task({
|
||||
subagent_type: "cli-explore-agent",
|
||||
run_in_background: false,
|
||||
description: `Explore ${dimension.name} (iteration ${iteration})`,
|
||||
prompt: buildDimensionPromptWithACE(dimension, iteration, cumulativeFindings, iterationAceResults, iterationDir)
|
||||
})
|
||||
);
|
||||
|
||||
// Wait for iteration agents
|
||||
const iterationResults = await Promise.all(agentPromises);
|
||||
|
||||
// Step 4: Collect and analyze iteration findings
|
||||
const iterationFindings = await collectIterationFindings(iterationDir, iterationPlan.dimensions);
|
||||
|
||||
// Step 5: Cross-reference findings between dimensions
|
||||
if (iterationPlan.dimensions.length > 1) {
|
||||
const crossRefs = findCrossReferences(iterationFindings, iterationPlan.dimensions);
|
||||
sharedContext.crossReferences.push(...crossRefs);
|
||||
}
|
||||
|
||||
cumulativeFindings.push(...iterationFindings);
|
||||
|
||||
// Step 6: Decide whether to continue
|
||||
const convergenceCheck = checkConvergence(iterationFindings, cumulativeFindings, explorationPlan);
|
||||
shouldContinue = !convergenceCheck.converged;
|
||||
|
||||
// Step 7: Update state (iteration summary embedded in state)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
iterations: [...state.iterations, {
|
||||
number: iteration,
|
||||
findings_count: iterationFindings.length,
|
||||
ace_queries: iterationAceQueries.length,
|
||||
cross_references: sharedContext.crossReferences.length,
|
||||
new_discoveries: convergenceCheck.newDiscoveries,
|
||||
confidence: convergenceCheck.confidence,
|
||||
continued: shouldContinue
|
||||
}],
|
||||
cumulative_findings: cumulativeFindings
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
**ACE in Iteration Loop**:
|
||||
|
||||
```
|
||||
Iteration N
|
||||
│
|
||||
├─→ ACE Search (based on previous findings)
|
||||
│ └─ Query: "related code paths for {finding.category}"
|
||||
│ └─ Result: Additional files to explore
|
||||
│
|
||||
├─→ Agent Exploration (with ACE context)
|
||||
│ └─ Agent receives: dimension targets + ACE suggestions
|
||||
│ └─ Agent can call ACE for deeper search
|
||||
│
|
||||
├─→ Cross-Reference Analysis
|
||||
│ └─ Compare findings between dimensions
|
||||
│ └─ Identify discrepancies
|
||||
│
|
||||
└─→ Convergence Check
|
||||
└─ New findings? Continue
|
||||
└─ No new findings? Terminate
|
||||
```
|
||||
|
||||
**Dimension Agent Prompt Template (with ACE)**:
|
||||
|
||||
```javascript
|
||||
function buildDimensionPromptWithACE(dimension, iteration, previousFindings, aceResults, outputDir) {
|
||||
// Filter ACE results relevant to this dimension
|
||||
const relevantAceResults = aceResults.filter(r =>
|
||||
r.query.includes(dimension.name) || dimension.focus_areas.some(fa => r.query.includes(fa))
|
||||
);
|
||||
|
||||
return `
|
||||
## Task Objective
|
||||
Explore ${dimension.name} dimension for issue discovery (Iteration ${iteration})
|
||||
|
||||
## Context
|
||||
- Dimension: ${dimension.name}
|
||||
- Description: ${dimension.description}
|
||||
- Search Targets: ${dimension.search_targets.join(', ')}
|
||||
- Focus Areas: ${dimension.focus_areas.join(', ')}
|
||||
|
||||
## ACE Semantic Search Results (Pre-gathered)
|
||||
The following files/code sections were identified by ACE as relevant to this dimension:
|
||||
${JSON.stringify(relevantAceResults.map(r => ({ query: r.query, files: r.result.slice(0, 5) })), null, 2)}
|
||||
|
||||
**Use ACE for deeper exploration**: You have access to mcp__ace-tool__search_context.
|
||||
When you find something interesting, use ACE to find related code:
|
||||
- mcp__ace-tool__search_context({ project_root_path: ".", query: "related to {finding}" })
|
||||
|
||||
${iteration > 1 ? `
|
||||
## Previous Findings to Build Upon
|
||||
${summarizePreviousFindings(previousFindings, dimension.name)}
|
||||
|
||||
## This Iteration Focus
|
||||
- Explore areas not yet covered (check ACE results for new files)
|
||||
- Verify/deepen previous findings
|
||||
- Follow leads from previous discoveries
|
||||
- Use ACE to find cross-references between dimensions
|
||||
` : ''}
|
||||
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Read exploration plan: ${outputDir}/../exploration-plan.json
|
||||
2. Read schema: ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
|
||||
3. Review ACE results above for starting points
|
||||
4. Explore files identified by ACE
|
||||
|
||||
## Exploration Instructions
|
||||
${dimension.agent_prompt}
|
||||
|
||||
## ACE Usage Guidelines
|
||||
- Use ACE when you need to find:
|
||||
- Where a function/class is used
|
||||
- Related implementations in other modules
|
||||
- Cross-module dependencies
|
||||
- Similar patterns elsewhere in codebase
|
||||
- Query format: Natural language, be specific
|
||||
- Example: "Where is UserService.authenticate called from?"
|
||||
|
||||
## Output Requirements
|
||||
|
||||
**1. Write JSON file**: ${outputDir}/${dimension.name}.json
|
||||
Follow discovery-finding-schema.json:
|
||||
- findings: [{id, title, category, description, file, line, snippet, confidence, related_dimension}]
|
||||
- coverage: {files_explored, areas_covered, areas_remaining}
|
||||
- leads: [{description, suggested_search}] // for next iteration
|
||||
- ace_queries_used: [{query, result_count}] // track ACE usage
|
||||
|
||||
**2. Return summary**:
|
||||
- Total findings this iteration
|
||||
- Key discoveries
|
||||
- ACE queries that revealed important code
|
||||
- Recommended next exploration areas
|
||||
|
||||
## Success Criteria
|
||||
- [ ] JSON written to ${outputDir}/${dimension.name}.json
|
||||
- [ ] Each finding has file:line reference
|
||||
- [ ] ACE used for cross-references where applicable
|
||||
- [ ] Coverage report included
|
||||
- [ ] Leads for next iteration identified
|
||||
`;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Cross-Analysis & Synthesis
|
||||
|
||||
```javascript
|
||||
// For comparison intent, perform cross-analysis
|
||||
if (intentType === 'comparison' && explorationPlan.comparison_matrix) {
|
||||
const comparisonResults = [];
|
||||
|
||||
for (const point of explorationPlan.comparison_matrix.comparison_points) {
|
||||
const dimensionAFindings = cumulativeFindings.filter(f =>
|
||||
f.related_dimension === explorationPlan.comparison_matrix.dimension_a &&
|
||||
f.category.includes(point.aspect)
|
||||
);
|
||||
|
||||
const dimensionBFindings = cumulativeFindings.filter(f =>
|
||||
f.related_dimension === explorationPlan.comparison_matrix.dimension_b &&
|
||||
f.category.includes(point.aspect)
|
||||
);
|
||||
|
||||
// Compare and find discrepancies
|
||||
const discrepancies = findDiscrepancies(dimensionAFindings, dimensionBFindings, point);
|
||||
|
||||
comparisonResults.push({
|
||||
aspect: point.aspect,
|
||||
dimension_a_count: dimensionAFindings.length,
|
||||
dimension_b_count: dimensionBFindings.length,
|
||||
discrepancies: discrepancies,
|
||||
match_rate: calculateMatchRate(dimensionAFindings, dimensionBFindings)
|
||||
});
|
||||
}
|
||||
|
||||
// Write comparison analysis
|
||||
await writeJson(`${outputDir}/comparison-analysis.json`, {
|
||||
matrix: explorationPlan.comparison_matrix,
|
||||
results: comparisonResults,
|
||||
summary: {
|
||||
total_discrepancies: comparisonResults.reduce((sum, r) => sum + r.discrepancies.length, 0),
|
||||
overall_match_rate: average(comparisonResults.map(r => r.match_rate)),
|
||||
critical_mismatches: comparisonResults.filter(r => r.match_rate < 0.5)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Prioritize all findings
|
||||
const prioritizedFindings = prioritizeFindings(cumulativeFindings, explorationPlan);
|
||||
```
|
||||
|
||||
### Phase 5: Issue Generation & Summary
|
||||
|
||||
```javascript
|
||||
// Convert high-confidence findings to issues
|
||||
const issueWorthy = prioritizedFindings.filter(f =>
|
||||
f.confidence >= 0.7 || f.priority === 'critical' || f.priority === 'high'
|
||||
);
|
||||
|
||||
const issues = issueWorthy.map(finding => ({
|
||||
id: `ISS-${discoveryId}-${finding.id}`,
|
||||
title: finding.title,
|
||||
description: finding.description,
|
||||
source: {
|
||||
discovery_id: discoveryId,
|
||||
finding_id: finding.id,
|
||||
dimension: finding.related_dimension
|
||||
},
|
||||
file: finding.file,
|
||||
line: finding.line,
|
||||
priority: finding.priority,
|
||||
category: finding.category,
|
||||
suggested_fix: finding.suggested_fix,
|
||||
confidence: finding.confidence,
|
||||
status: 'discovered',
|
||||
created_at: new Date().toISOString()
|
||||
}));
|
||||
|
||||
// Write issues
|
||||
await writeJsonl(`${outputDir}/discovery-issues.jsonl`, issues);
|
||||
|
||||
// Update final state (summary embedded in state, no separate file)
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'complete',
|
||||
updated_at: new Date().toISOString(),
|
||||
results: {
|
||||
total_iterations: iteration,
|
||||
total_findings: cumulativeFindings.length,
|
||||
issues_generated: issues.length,
|
||||
comparison_match_rate: comparisonResults
|
||||
? average(comparisonResults.map(r => r.match_rate))
|
||||
: null
|
||||
}
|
||||
});
|
||||
|
||||
// Prompt user for next action
|
||||
await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Discovery complete: ${issues.length} issues from ${cumulativeFindings.length} findings across ${iteration} iterations. What next?`,
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Export to Issues (Recommended)", description: `Export ${issues.length} issues for planning` },
|
||||
{ label: "Review Details", description: "View comparison analysis and iteration details" },
|
||||
{ label: "Run Deeper", description: "Continue with more iterations" },
|
||||
{ label: "Skip", description: "Complete without exporting" }
|
||||
]
|
||||
}]
|
||||
});
|
||||
```
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/issues/discoveries/
|
||||
└── {DBP-YYYYMMDD-HHmmss}/
|
||||
├── discovery-state.json # Session state with iteration tracking
|
||||
├── iterations/
|
||||
│ ├── 1/
|
||||
│ │ └── {dimension}.json # Dimension findings
|
||||
│ ├── 2/
|
||||
│ │ └── {dimension}.json
|
||||
│ └── ...
|
||||
├── comparison-analysis.json # Cross-dimension comparison (if applicable)
|
||||
└── discovery-issues.jsonl # Generated issue candidates
|
||||
```
|
||||
|
||||
**Simplified Design**:
|
||||
- ACE context and Gemini plan kept in memory, not persisted
|
||||
- Iteration summaries embedded in state
|
||||
- No separate summary.md (state.json contains all needed info)
|
||||
|
||||
## Schema References
|
||||
|
||||
| Schema | Path | Used By |
|
||||
|--------|------|---------|
|
||||
| **Discovery State** | `discovery-state-schema.json` | Orchestrator (state tracking) |
|
||||
| **Discovery Finding** | `discovery-finding-schema.json` | Dimension agents (output) |
|
||||
| **Exploration Plan** | `exploration-plan-schema.json` | Gemini output validation (memory only) |
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--scope` | `**/*` | File pattern to explore |
|
||||
| `--depth` | `standard` | `standard` (3 iterations) or `deep` (5+ iterations) |
|
||||
| `--max-iterations` | 5 | Maximum exploration iterations |
|
||||
| `--tool` | `gemini` | Planning tool (gemini/qwen) |
|
||||
| `--plan-only` | `false` | Stop after Phase 2 (Gemini planning), show plan for user review |
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Single Module Deep Dive
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Find all potential issues in the auth module" --scope=src/auth/**
|
||||
```
|
||||
|
||||
**Gemini plans** (single dimension):
|
||||
- Dimension: auth-module
|
||||
- Focus: security vulnerabilities, edge cases, error handling, test gaps
|
||||
|
||||
**Iterations**: 2-3 (until no new findings)
|
||||
|
||||
### Example 2: API Contract Comparison
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Check if API calls match implementations" --scope=src/**
|
||||
```
|
||||
|
||||
**Gemini plans** (comparison):
|
||||
- Dimension 1: api-consumers (fetch calls, hooks, services)
|
||||
- Dimension 2: api-providers (handlers, routes, controllers)
|
||||
- Comparison matrix: endpoints, methods, payloads, responses
|
||||
|
||||
### Example 3: Multi-Module Audit
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Audit the payment flow for issues" --scope=src/payment/**
|
||||
```
|
||||
|
||||
**Gemini plans** (multi-dimension):
|
||||
- Dimension 1: payment-logic (calculations, state transitions)
|
||||
- Dimension 2: validation (input checks, business rules)
|
||||
- Dimension 3: error-handling (failure modes, recovery)
|
||||
|
||||
### Example 4: Plan Only Mode
|
||||
|
||||
```bash
|
||||
/issue:discover-by-prompt "Find inconsistent patterns" --plan-only
|
||||
```
|
||||
|
||||
Stops after Gemini planning, outputs:
|
||||
```
|
||||
Gemini Plan:
|
||||
- Intent: search
|
||||
- Dimensions: 2 (pattern-definitions, pattern-usages)
|
||||
- Estimated iterations: 3
|
||||
|
||||
Continue with exploration? [Y/n]
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
```bash
|
||||
# After discovery, plan solutions
|
||||
/issue:plan DBP-001-01,DBP-001-02
|
||||
|
||||
# View all discoveries
|
||||
/issue:manage
|
||||
|
||||
# Standard perspective-based discovery
|
||||
/issue:discover src/auth/** --perspectives=security,bug
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Be Specific in Prompts**: More specific prompts lead to better Gemini planning
|
||||
2. **Scope Appropriately**: Narrow scope for focused comparison, wider for audits
|
||||
3. **Review Exploration Plan**: Check `exploration-plan.json` before long explorations
|
||||
4. **Use Standard Depth First**: Start with standard, go deep only if needed
|
||||
5. **Combine with `/issue:discover`**: Use prompt-based for comparisons, perspective-based for audits
|
||||
472
.claude/commands/issue/discover.md
Normal file
472
.claude/commands/issue/discover.md
Normal file
@@ -0,0 +1,472 @@
|
||||
---
|
||||
name: issue:discover
|
||||
description: Discover potential issues from multiple perspectives (bug, UX, test, quality, security, performance, maintainability, best-practices) using CLI explore. Supports Exa external research for security and best-practices perspectives.
|
||||
argument-hint: "[-y|--yes] <path-pattern> [--perspectives=bug,ux,...] [--external]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), AskUserQuestion(*), Glob(*), Grep(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select all perspectives, skip confirmations.
|
||||
|
||||
# Issue Discovery Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Discover issues in specific module (interactive perspective selection)
|
||||
/issue:discover src/auth/**
|
||||
|
||||
# Discover with specific perspectives
|
||||
/issue:discover src/payment/** --perspectives=bug,security,test
|
||||
|
||||
# Discover with external research for all perspectives
|
||||
/issue:discover src/api/** --external
|
||||
|
||||
# Discover in multiple modules
|
||||
/issue:discover src/auth/**,src/payment/**
|
||||
```
|
||||
|
||||
**Discovery Scope**: Specified modules/files only
|
||||
**Output Directory**: `.workflow/issues/discoveries/{discovery-id}/`
|
||||
**Available Perspectives**: bug, ux, test, quality, security, performance, maintainability, best-practices
|
||||
**Exa Integration**: Auto-enabled for security and best-practices perspectives
|
||||
**CLI Tools**: Gemini → Qwen → Codex (fallback chain)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Multi-perspective issue discovery orchestrator that explores code from different angles to identify potential bugs, UX improvements, test gaps, and other actionable items. Unlike code review (which assesses existing code quality), discovery focuses on **finding opportunities for improvement and potential problems**.
|
||||
|
||||
**vs Code Review**:
|
||||
- **Code Review** (`review-module-cycle`): Evaluates code quality against standards
|
||||
- **Issue Discovery** (`issue:discover`): Finds actionable issues, bugs, and improvement opportunities
|
||||
|
||||
### Value Proposition
|
||||
1. **Proactive Issue Detection**: Find problems before they become bugs
|
||||
2. **Multi-Perspective Analysis**: Each perspective surfaces different types of issues
|
||||
3. **External Benchmarking**: Compare against industry best practices via Exa
|
||||
4. **Direct Issue Integration**: Discoveries can be exported to issue tracker
|
||||
5. **Dashboard Management**: View, filter, and export discoveries via CCW dashboard
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Parse target pattern, create session, initialize output structure
|
||||
|
||||
Phase 2: Interactive Perspective Selection
|
||||
└─ AskUserQuestion for perspective selection (or use --perspectives)
|
||||
|
||||
Phase 3: Parallel Perspective Analysis
|
||||
├─ Launch N @cli-explore-agent instances (one per perspective)
|
||||
├─ Security & Best-Practices auto-trigger Exa research
|
||||
├─ Agent writes perspective JSON, returns summary
|
||||
└─ Update discovery-progress.json
|
||||
|
||||
Phase 4: Aggregation & Prioritization
|
||||
├─ Collect agent return summaries
|
||||
├─ Load perspective JSON files
|
||||
├─ Merge findings, deduplicate by file+line
|
||||
└─ Calculate priority scores
|
||||
|
||||
Phase 5: Issue Generation & Summary
|
||||
├─ Convert high-priority discoveries to issue format
|
||||
├─ Write to discovery-issues.jsonl
|
||||
├─ Generate single summary.md from agent returns
|
||||
└─ Update discovery-state.json to complete
|
||||
|
||||
Phase 6: User Action Prompt
|
||||
└─ AskUserQuestion for next step (export/dashboard/skip)
|
||||
```
|
||||
|
||||
## Perspectives
|
||||
|
||||
### Available Perspectives
|
||||
|
||||
| Perspective | Focus | Categories | Exa |
|
||||
|-------------|-------|------------|-----|
|
||||
| **bug** | Potential Bugs | edge-case, null-check, resource-leak, race-condition, boundary, exception-handling | - |
|
||||
| **ux** | User Experience | error-message, loading-state, feedback, accessibility, interaction, consistency | - |
|
||||
| **test** | Test Coverage | missing-test, edge-case-test, integration-gap, coverage-hole, assertion-quality | - |
|
||||
| **quality** | Code Quality | complexity, duplication, naming, documentation, code-smell, readability | - |
|
||||
| **security** | Security Issues | injection, auth, encryption, input-validation, data-exposure, access-control | ✓ |
|
||||
| **performance** | Performance | n-plus-one, memory-usage, caching, algorithm, blocking-operation, resource | - |
|
||||
| **maintainability** | Maintainability | coupling, cohesion, tech-debt, extensibility, module-boundary, interface-design | - |
|
||||
| **best-practices** | Best Practices | convention, pattern, framework-usage, anti-pattern, industry-standard | ✓ |
|
||||
|
||||
### Interactive Perspective Selection
|
||||
|
||||
When no `--perspectives` flag is provided, the command uses AskUserQuestion:
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Select primary discovery focus:",
|
||||
header: "Focus",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Bug + Test + Quality", description: "Quick scan: potential bugs, test gaps, code quality (Recommended)" },
|
||||
{ label: "Security + Performance", description: "System audit: security issues, performance bottlenecks" },
|
||||
{ label: "Maintainability + Best-practices", description: "Long-term health: coupling, tech debt, conventions" },
|
||||
{ label: "Full analysis", description: "All 7 perspectives (comprehensive, takes longer)" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**Recommended Combinations**:
|
||||
- Quick scan: bug, test, quality
|
||||
- Full analysis: all perspectives
|
||||
- Security audit: security, bug, quality
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
|
||||
```javascript
|
||||
// Step 1: Parse target pattern and resolve files
|
||||
const resolvedFiles = await expandGlobPattern(targetPattern);
|
||||
if (resolvedFiles.length === 0) {
|
||||
throw new Error(`No files matched pattern: ${targetPattern}`);
|
||||
}
|
||||
|
||||
// Step 2: Generate discovery ID
|
||||
const discoveryId = `DSC-${formatDate(new Date(), 'YYYYMMDD-HHmmss')}`;
|
||||
|
||||
// Step 3: Create output directory
|
||||
const outputDir = `.workflow/issues/discoveries/${discoveryId}`;
|
||||
await mkdir(outputDir, { recursive: true });
|
||||
await mkdir(`${outputDir}/perspectives`, { recursive: true });
|
||||
|
||||
// Step 4: Initialize unified discovery state (merged state+progress)
|
||||
await writeJson(`${outputDir}/discovery-state.json`, {
|
||||
discovery_id: discoveryId,
|
||||
target_pattern: targetPattern,
|
||||
phase: "initialization",
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
target: { files_count: { total: resolvedFiles.length }, project: {} },
|
||||
perspectives: [], // filled after selection: [{name, status, findings}]
|
||||
external_research: { enabled: false, completed: false },
|
||||
results: { total_findings: 0, issues_generated: 0, priority_distribution: {} }
|
||||
});
|
||||
```
|
||||
|
||||
**Phase 2: Perspective Selection**
|
||||
|
||||
```javascript
|
||||
// Check for --perspectives flag
|
||||
let selectedPerspectives = [];
|
||||
|
||||
if (args.perspectives) {
|
||||
selectedPerspectives = args.perspectives.split(',').map(p => p.trim());
|
||||
} else {
|
||||
// Interactive selection via AskUserQuestion
|
||||
const response = await AskUserQuestion({...});
|
||||
selectedPerspectives = parseSelectedPerspectives(response);
|
||||
}
|
||||
|
||||
// Validate and update state
|
||||
await updateDiscoveryState(outputDir, {
|
||||
'metadata.perspectives': selectedPerspectives,
|
||||
phase: 'parallel'
|
||||
});
|
||||
```
|
||||
|
||||
**Phase 3: Parallel Perspective Analysis**
|
||||
|
||||
Launch N agents in parallel (one per selected perspective):
|
||||
|
||||
```javascript
|
||||
// Launch agents in parallel - agents write JSON and return summary
|
||||
const agentPromises = selectedPerspectives.map(perspective =>
|
||||
Task({
|
||||
subagent_type: "cli-explore-agent",
|
||||
run_in_background: false,
|
||||
description: `Discover ${perspective} issues`,
|
||||
prompt: buildPerspectivePrompt(perspective, discoveryId, resolvedFiles, outputDir)
|
||||
})
|
||||
);
|
||||
|
||||
// Wait for all agents - collect their return summaries
|
||||
const results = await Promise.all(agentPromises);
|
||||
// results contain agent summaries for final report
|
||||
```
|
||||
|
||||
**Phase 4: Aggregation & Prioritization**
|
||||
|
||||
```javascript
|
||||
// Load all perspective JSON files written by agents
|
||||
const allFindings = [];
|
||||
for (const perspective of selectedPerspectives) {
|
||||
const jsonPath = `${outputDir}/perspectives/${perspective}.json`;
|
||||
if (await fileExists(jsonPath)) {
|
||||
const data = await readJson(jsonPath);
|
||||
allFindings.push(...data.findings.map(f => ({ ...f, perspective })));
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate and prioritize
|
||||
const prioritizedFindings = deduplicateAndPrioritize(allFindings);
|
||||
|
||||
// Update unified state
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'aggregation',
|
||||
'results.total_findings': prioritizedFindings.length,
|
||||
'results.priority_distribution': countByPriority(prioritizedFindings)
|
||||
});
|
||||
```
|
||||
|
||||
**Phase 5: Issue Generation & Summary**
|
||||
|
||||
```javascript
|
||||
// Convert high-priority findings to issues
|
||||
const issueWorthy = prioritizedFindings.filter(f =>
|
||||
f.priority === 'critical' || f.priority === 'high' || f.priority_score >= 0.7
|
||||
);
|
||||
|
||||
// Write discovery-issues.jsonl
|
||||
await writeJsonl(`${outputDir}/discovery-issues.jsonl`, issues);
|
||||
|
||||
// Generate single summary.md from agent return summaries
|
||||
// Orchestrator briefly summarizes what agents returned (NO detailed reports)
|
||||
await writeSummaryFromAgentReturns(outputDir, results, prioritizedFindings, issues);
|
||||
|
||||
// Update final state
|
||||
await updateDiscoveryState(outputDir, {
|
||||
phase: 'complete',
|
||||
updated_at: new Date().toISOString(),
|
||||
'results.issues_generated': issues.length
|
||||
});
|
||||
```
|
||||
|
||||
**Phase 6: User Action Prompt**
|
||||
|
||||
```javascript
|
||||
// Prompt user for next action based on discovery results
|
||||
const hasHighPriority = issues.some(i => i.priority === 'critical' || i.priority === 'high');
|
||||
const hasMediumFindings = prioritizedFindings.some(f => f.priority === 'medium');
|
||||
|
||||
await AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Discovery complete: ${issues.length} issues generated, ${prioritizedFindings.length} total findings. What would you like to do next?`,
|
||||
header: "Next Step",
|
||||
multiSelect: false,
|
||||
options: hasHighPriority ? [
|
||||
{ label: "Export to Issues (Recommended)", description: `${issues.length} high-priority issues found - export to issue tracker for planning` },
|
||||
{ label: "Open Dashboard", description: "Review findings in ccw view before exporting" },
|
||||
{ label: "Skip", description: "Complete discovery without exporting" }
|
||||
] : hasMediumFindings ? [
|
||||
{ label: "Open Dashboard (Recommended)", description: "Review medium-priority findings in ccw view to decide which to export" },
|
||||
{ label: "Export to Issues", description: `Export ${issues.length} issues to tracker` },
|
||||
{ label: "Skip", description: "Complete discovery without exporting" }
|
||||
] : [
|
||||
{ label: "Skip (Recommended)", description: "No significant issues found - complete discovery" },
|
||||
{ label: "Open Dashboard", description: "Review all findings in ccw view" },
|
||||
{ label: "Export to Issues", description: `Export ${issues.length} issues anyway` }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Handle response
|
||||
if (response === "Export to Issues") {
|
||||
// Append to issues.jsonl
|
||||
await appendJsonl('.workflow/issues/issues.jsonl', issues);
|
||||
console.log(`Exported ${issues.length} issues. Run /issue:plan to continue.`);
|
||||
} else if (response === "Open Dashboard") {
|
||||
console.log('Run `ccw view` and navigate to Issues > Discovery to manage findings.');
|
||||
}
|
||||
```
|
||||
|
||||
### Output File Structure
|
||||
|
||||
```
|
||||
.workflow/issues/discoveries/
|
||||
├── index.json # Discovery session index
|
||||
└── {discovery-id}/
|
||||
├── discovery-state.json # Unified state (merged state+progress)
|
||||
├── perspectives/
|
||||
│ └── {perspective}.json # Per-perspective findings
|
||||
├── external-research.json # Exa research results (if enabled)
|
||||
├── discovery-issues.jsonl # Generated candidate issues
|
||||
└── summary.md # Single summary (from agent returns)
|
||||
```
|
||||
|
||||
### Schema References
|
||||
|
||||
**External Schema Files** (agent MUST read and follow exactly):
|
||||
|
||||
| Schema | Path | Purpose |
|
||||
|--------|------|---------|
|
||||
| **Discovery State** | `~/.claude/workflows/cli-templates/schemas/discovery-state-schema.json` | Session state machine |
|
||||
| **Discovery Finding** | `~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json` | Perspective analysis results |
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Perspective Analysis Agent**:
|
||||
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-explore-agent",
|
||||
run_in_background: false,
|
||||
description: `Discover ${perspective} issues`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Discover potential ${perspective} issues in specified module files.
|
||||
|
||||
## Discovery Context
|
||||
- Discovery ID: ${discoveryId}
|
||||
- Perspective: ${perspective}
|
||||
- Target Pattern: ${targetPattern}
|
||||
- Resolved Files: ${resolvedFiles.length} files
|
||||
- Output Directory: ${outputDir}
|
||||
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Read discovery state: ${outputDir}/discovery-state.json
|
||||
2. Read schema: ~/.claude/workflows/cli-templates/schemas/discovery-finding-schema.json
|
||||
3. Analyze target files for ${perspective} concerns
|
||||
|
||||
## Output Requirements
|
||||
|
||||
**1. Write JSON file**: ${outputDir}/perspectives/${perspective}.json
|
||||
- Follow discovery-finding-schema.json exactly
|
||||
- Each finding: id, title, priority, category, description, file, line, snippet, suggested_issue, confidence
|
||||
|
||||
**2. Return summary** (DO NOT write report file):
|
||||
- Return a brief text summary of findings
|
||||
- Include: total findings, priority breakdown, key issues
|
||||
- This summary will be used by orchestrator for final report
|
||||
|
||||
## Perspective-Specific Guidance
|
||||
${getPerspectiveGuidance(perspective)}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] JSON written to ${outputDir}/perspectives/${perspective}.json
|
||||
- [ ] Summary returned with findings count and key issues
|
||||
- [ ] Each finding includes actionable suggested_issue
|
||||
- [ ] Priority uses lowercase enum: critical/high/medium/low
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Exa Research Agent** (for security and best-practices):
|
||||
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-explore-agent",
|
||||
run_in_background: false,
|
||||
description: `External research for ${perspective} via Exa`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Research industry best practices for ${perspective} using Exa search
|
||||
|
||||
## Research Steps
|
||||
1. Read project tech stack: .workflow/project-tech.json
|
||||
2. Use Exa to search for best practices
|
||||
3. Synthesize findings relevant to this project
|
||||
|
||||
## Output Requirements
|
||||
|
||||
**1. Write JSON file**: ${outputDir}/external-research.json
|
||||
- Include sources, key_findings, gap_analysis, recommendations
|
||||
|
||||
**2. Return summary** (DO NOT write report file):
|
||||
- Brief summary of external research findings
|
||||
- Key recommendations for the project
|
||||
|
||||
## Success Criteria
|
||||
- [ ] JSON written to ${outputDir}/external-research.json
|
||||
- [ ] Summary returned with key recommendations
|
||||
- [ ] Findings are relevant to project's tech stack
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
### Perspective Guidance Reference
|
||||
|
||||
```javascript
|
||||
function getPerspectiveGuidance(perspective) {
|
||||
const guidance = {
|
||||
bug: `
|
||||
Focus: Null checks, edge cases, resource leaks, race conditions, boundary conditions, exception handling
|
||||
Priority: Critical=data corruption/crash, High=malfunction, Medium=edge case issues, Low=minor
|
||||
`,
|
||||
ux: `
|
||||
Focus: Error messages, loading states, feedback, accessibility, interaction patterns, form validation
|
||||
Priority: Critical=inaccessible, High=confusing, Medium=inconsistent, Low=cosmetic
|
||||
`,
|
||||
test: `
|
||||
Focus: Missing unit tests, edge case coverage, integration gaps, assertion quality, test isolation
|
||||
Priority: Critical=no security tests, High=no core logic tests, Medium=weak coverage, Low=minor gaps
|
||||
`,
|
||||
quality: `
|
||||
Focus: Complexity, duplication, naming, documentation, code smells, readability
|
||||
Priority: Critical=unmaintainable, High=significant issues, Medium=naming/docs, Low=minor refactoring
|
||||
`,
|
||||
security: `
|
||||
Focus: Input validation, auth/authz, injection, XSS/CSRF, data exposure, access control
|
||||
Priority: Critical=auth bypass/injection, High=missing authz, Medium=weak validation, Low=headers
|
||||
`,
|
||||
performance: `
|
||||
Focus: N+1 queries, memory leaks, caching, algorithm efficiency, blocking operations
|
||||
Priority: Critical=memory leaks, High=N+1/inefficient, Medium=missing cache, Low=minor optimization
|
||||
`,
|
||||
maintainability: `
|
||||
Focus: Coupling, interface design, tech debt, extensibility, module boundaries, configuration
|
||||
Priority: Critical=unrelated code changes, High=unclear boundaries, Medium=coupling, Low=refactoring
|
||||
`,
|
||||
'best-practices': `
|
||||
Focus: Framework conventions, language patterns, anti-patterns, deprecated APIs, coding standards
|
||||
Priority: Critical=anti-patterns causing bugs, High=convention violations, Medium=style, Low=cosmetic
|
||||
`
|
||||
};
|
||||
return guidance[perspective] || 'General code discovery analysis';
|
||||
}
|
||||
```
|
||||
|
||||
## Dashboard Integration
|
||||
|
||||
### Viewing Discoveries
|
||||
|
||||
Open CCW dashboard to manage discoveries:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
Navigate to **Issues > Discovery** to:
|
||||
- View all discovery sessions
|
||||
- Filter findings by perspective and priority
|
||||
- Preview finding details
|
||||
- Select and export findings as issues
|
||||
|
||||
### Exporting to Issues
|
||||
|
||||
From the dashboard, select findings and click "Export as Issues" to:
|
||||
1. Convert discoveries to standard issue format
|
||||
2. Append to `.workflow/issues/issues.jsonl`
|
||||
3. Set status to `registered`
|
||||
4. Continue with `/issue:plan` workflow
|
||||
|
||||
## Related Commands
|
||||
|
||||
```bash
|
||||
# After discovery, plan solutions for exported issues
|
||||
/issue:plan DSC-001,DSC-002,DSC-003
|
||||
|
||||
# Or use interactive management
|
||||
/issue:manage
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Focused**: Begin with specific modules rather than entire codebase
|
||||
2. **Use Quick Scan First**: Start with bug, test, quality for fast results
|
||||
3. **Review Before Export**: Not all discoveries warrant issues - use dashboard to filter
|
||||
4. **Combine Perspectives**: Run related perspectives together (e.g., security + bug)
|
||||
5. **Enable Exa for New Tech**: When using unfamiliar frameworks, enable external research
|
||||
580
.claude/commands/issue/execute.md
Normal file
580
.claude/commands/issue/execute.md
Normal file
@@ -0,0 +1,580 @@
|
||||
---
|
||||
name: execute
|
||||
description: Execute queue with DAG-based parallel orchestration (one commit per solution)
|
||||
argument-hint: "[-y|--yes] --queue <queue-id> [--worktree [<existing-path>]]"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm execution, use recommended settings.
|
||||
|
||||
# Issue Execute Command (/issue:execute)
|
||||
|
||||
## Overview
|
||||
|
||||
Minimal orchestrator that dispatches **solution IDs** to executors. Each executor receives a complete solution with all its tasks.
|
||||
|
||||
**Design Principles:**
|
||||
- `queue dag` → returns parallel batches with solution IDs (S-1, S-2, ...)
|
||||
- `detail <id>` → READ-ONLY solution fetch (returns full solution with all tasks)
|
||||
- `done <id>` → update solution completion status
|
||||
- No race conditions: status changes only via `done`
|
||||
- **Executor handles all tasks within a solution sequentially**
|
||||
- **Single worktree for entire queue**: One worktree isolates ALL queue execution from main workspace
|
||||
|
||||
## Queue ID Requirement (MANDATORY)
|
||||
|
||||
**Queue ID is REQUIRED.** You MUST specify which queue to execute via `--queue <queue-id>`.
|
||||
|
||||
### If Queue ID Not Provided
|
||||
|
||||
When `--queue` parameter is missing, you MUST:
|
||||
|
||||
1. **List available queues** by running:
|
||||
```javascript
|
||||
const result = Bash('ccw issue queue list --brief --json');
|
||||
const index = JSON.parse(result);
|
||||
```
|
||||
|
||||
2. **Display available queues** to user:
|
||||
```
|
||||
Available Queues:
|
||||
ID Status Progress Issues
|
||||
-----------------------------------------------------------
|
||||
→ QUE-20251215-001 active 3/10 ISS-001, ISS-002
|
||||
QUE-20251210-002 active 0/5 ISS-003
|
||||
QUE-20251205-003 completed 8/8 ISS-004
|
||||
```
|
||||
|
||||
3. **Stop and ask user** to specify which queue to execute:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Which queue would you like to execute?",
|
||||
header: "Queue",
|
||||
multiSelect: false,
|
||||
options: index.queues
|
||||
.filter(q => q.status === 'active')
|
||||
.map(q => ({
|
||||
label: q.id,
|
||||
description: `${q.status}, ${q.completed_solutions || 0}/${q.total_solutions || 0} completed, Issues: ${q.issue_ids.join(', ')}`
|
||||
}))
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
4. **After user selection**, continue execution with the selected queue ID.
|
||||
|
||||
**DO NOT auto-select queues.** Explicit user confirmation is required to prevent accidental execution of wrong queue.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/issue:execute --queue QUE-xxx # Execute specific queue (REQUIRED)
|
||||
/issue:execute --queue QUE-xxx --worktree # Execute in isolated worktree
|
||||
/issue:execute --queue QUE-xxx --worktree /path/to/existing/worktree # Resume
|
||||
```
|
||||
|
||||
**Parallelism**: Determined automatically by task dependency DAG (no manual control)
|
||||
**Executor & Dry-run**: Selected via interactive prompt (AskUserQuestion)
|
||||
**Worktree**: Creates ONE worktree for the entire queue execution (not per-solution)
|
||||
|
||||
**⭐ Recommended Executor**: **Codex** - Best for long-running autonomous work (2hr timeout), supports background execution and full write access
|
||||
|
||||
**Worktree Options**:
|
||||
- `--worktree` - Create a new worktree with timestamp-based name
|
||||
- `--worktree <existing-path>` - Resume in an existing worktree (for recovery/continuation)
|
||||
|
||||
**Resume**: Use `git worktree list` to find existing worktrees from interrupted executions
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 0: Validate Queue ID (REQUIRED)
|
||||
├─ If --queue provided → use specified queue
|
||||
├─ If --queue missing → list queues, prompt user to select
|
||||
└─ Store QUEUE_ID for all subsequent commands
|
||||
|
||||
Phase 0.5 (if --worktree): Setup Queue Worktree
|
||||
├─ Create ONE worktree for entire queue: .ccw/worktrees/queue-<timestamp>
|
||||
├─ All subsequent execution happens in this worktree
|
||||
└─ Main workspace remains clean and untouched
|
||||
|
||||
Phase 1: Get DAG & User Selection
|
||||
├─ ccw issue queue dag --queue ${QUEUE_ID} → { parallel_batches: [["S-1","S-2"], ["S-3"]] }
|
||||
└─ AskUserQuestion → executor type (codex|gemini|agent), dry-run mode, worktree mode
|
||||
|
||||
Phase 2: Dispatch Parallel Batch (DAG-driven)
|
||||
├─ Parallelism determined by DAG (no manual limit)
|
||||
├─ All executors work in the SAME worktree (or main if no worktree)
|
||||
├─ For each solution ID in batch (parallel - all at once):
|
||||
│ ├─ Executor calls: ccw issue detail <id> (READ-ONLY)
|
||||
│ ├─ Executor gets FULL SOLUTION with all tasks
|
||||
│ ├─ Executor implements all tasks sequentially (T1 → T2 → T3)
|
||||
│ ├─ Executor tests + verifies each task
|
||||
│ ├─ Executor commits ONCE per solution (with formatted summary)
|
||||
│ └─ Executor calls: ccw issue done <id>
|
||||
└─ Wait for batch completion
|
||||
|
||||
Phase 3: Next Batch (repeat Phase 2)
|
||||
└─ ccw issue queue dag → check for newly-ready solutions
|
||||
|
||||
Phase 4 (if --worktree): Worktree Completion
|
||||
├─ All batches complete → prompt for merge strategy
|
||||
└─ Options: Create PR / Merge to main / Keep branch
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 0: Validate Queue ID
|
||||
|
||||
```javascript
|
||||
// Check if --queue was provided
|
||||
let QUEUE_ID = args.queue;
|
||||
|
||||
if (!QUEUE_ID) {
|
||||
// List available queues
|
||||
const listResult = Bash('ccw issue queue list --brief --json').trim();
|
||||
const index = JSON.parse(listResult);
|
||||
|
||||
if (index.queues.length === 0) {
|
||||
console.log('No queues found. Use /issue:queue to create one first.');
|
||||
return;
|
||||
}
|
||||
|
||||
// Filter active queues only
|
||||
const activeQueues = index.queues.filter(q => q.status === 'active');
|
||||
|
||||
if (activeQueues.length === 0) {
|
||||
console.log('No active queues found.');
|
||||
console.log('Available queues:', index.queues.map(q => `${q.id} (${q.status})`).join(', '));
|
||||
return;
|
||||
}
|
||||
|
||||
// Display and prompt user
|
||||
console.log('\nAvailable Queues:');
|
||||
console.log('ID'.padEnd(22) + 'Status'.padEnd(12) + 'Progress'.padEnd(12) + 'Issues');
|
||||
console.log('-'.repeat(70));
|
||||
for (const q of index.queues) {
|
||||
const marker = q.id === index.active_queue_id ? '→ ' : ' ';
|
||||
console.log(marker + q.id.padEnd(20) + q.status.padEnd(12) +
|
||||
`${q.completed_solutions || 0}/${q.total_solutions || 0}`.padEnd(12) +
|
||||
q.issue_ids.join(', '));
|
||||
}
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Which queue would you like to execute?",
|
||||
header: "Queue",
|
||||
multiSelect: false,
|
||||
options: activeQueues.map(q => ({
|
||||
label: q.id,
|
||||
description: `${q.completed_solutions || 0}/${q.total_solutions || 0} completed, Issues: ${q.issue_ids.join(', ')}`
|
||||
}))
|
||||
}]
|
||||
});
|
||||
|
||||
QUEUE_ID = answer['Queue'];
|
||||
}
|
||||
|
||||
console.log(`\n## Executing Queue: ${QUEUE_ID}\n`);
|
||||
```
|
||||
|
||||
### Phase 1: Get DAG & User Selection
|
||||
|
||||
```javascript
|
||||
// Get dependency graph and parallel batches (QUEUE_ID required)
|
||||
const dagJson = Bash(`ccw issue queue dag --queue ${QUEUE_ID}`).trim();
|
||||
const dag = JSON.parse(dagJson);
|
||||
|
||||
if (dag.error || dag.ready_count === 0) {
|
||||
console.log(dag.error || 'No solutions ready for execution');
|
||||
console.log('Use /issue:queue to form a queue first');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`
|
||||
## Queue DAG (Solution-Level)
|
||||
|
||||
- Total Solutions: ${dag.total}
|
||||
- Ready: ${dag.ready_count}
|
||||
- Completed: ${dag.completed_count}
|
||||
- Parallel in batch 1: ${dag.parallel_batches[0]?.length || 0}
|
||||
`);
|
||||
|
||||
// Interactive selection via AskUserQuestion
|
||||
const answer = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: 'Select executor type:',
|
||||
header: 'Executor',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Codex (Recommended)', description: 'Autonomous coding with full write access' },
|
||||
{ label: 'Gemini', description: 'Large context analysis and implementation' },
|
||||
{ label: 'Agent', description: 'Claude Code sub-agent for complex tasks' }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: 'Execution mode:',
|
||||
header: 'Mode',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Execute (Recommended)', description: 'Run all ready solutions' },
|
||||
{ label: 'Dry-run', description: 'Show DAG and batches without executing' }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: 'Use git worktree for queue isolation?',
|
||||
header: 'Worktree',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Yes (Recommended)', description: 'Create ONE worktree for entire queue - main stays clean' },
|
||||
{ label: 'No', description: 'Work directly in current directory' }
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
const executor = answer['Executor'].toLowerCase().split(' ')[0]; // codex|gemini|agent
|
||||
const isDryRun = answer['Mode'].includes('Dry-run');
|
||||
const useWorktree = answer['Worktree'].includes('Yes');
|
||||
|
||||
// Dry run mode
|
||||
if (isDryRun) {
|
||||
console.log('### Parallel Batches (Dry-run):\n');
|
||||
dag.parallel_batches.forEach((batch, i) => {
|
||||
console.log(`Batch ${i + 1}: ${batch.join(', ')}`);
|
||||
});
|
||||
return;
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 0 & 2: Setup Queue Worktree & Dispatch
|
||||
|
||||
```javascript
|
||||
// Parallelism determined by DAG - no manual limit
|
||||
// All solutions in same batch have NO file conflicts and can run in parallel
|
||||
const batch = dag.parallel_batches[0] || [];
|
||||
|
||||
// Initialize TodoWrite
|
||||
TodoWrite({
|
||||
todos: batch.map(id => ({
|
||||
content: `Execute solution ${id}`,
|
||||
status: 'pending',
|
||||
activeForm: `Executing solution ${id}`
|
||||
}))
|
||||
});
|
||||
|
||||
console.log(`\n### Executing Solutions (DAG batch 1): ${batch.join(', ')}`);
|
||||
|
||||
// Parse existing worktree path from args if provided
|
||||
// Example: --worktree /path/to/existing/worktree
|
||||
const existingWorktree = args.worktree && typeof args.worktree === 'string' ? args.worktree : null;
|
||||
|
||||
// Setup ONE worktree for entire queue (not per-solution)
|
||||
let worktreePath = null;
|
||||
let worktreeBranch = null;
|
||||
|
||||
if (useWorktree) {
|
||||
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
||||
const worktreeBase = `${repoRoot}/.ccw/worktrees`;
|
||||
Bash(`mkdir -p "${worktreeBase}"`);
|
||||
Bash('git worktree prune'); // Cleanup stale worktrees
|
||||
|
||||
if (existingWorktree) {
|
||||
// Resume mode: Use existing worktree
|
||||
worktreePath = existingWorktree;
|
||||
worktreeBranch = Bash(`git -C "${worktreePath}" branch --show-current`).trim();
|
||||
console.log(`Resuming in existing worktree: ${worktreePath} (branch: ${worktreeBranch})`);
|
||||
} else {
|
||||
// Create mode: ONE worktree for the entire queue
|
||||
const timestamp = new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14);
|
||||
worktreeBranch = `queue-exec-${dag.queue_id || timestamp}`;
|
||||
worktreePath = `${worktreeBase}/${worktreeBranch}`;
|
||||
Bash(`git worktree add "${worktreePath}" -b "${worktreeBranch}"`);
|
||||
console.log(`Created queue worktree: ${worktreePath}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Launch ALL solutions in batch in parallel (DAG guarantees no conflicts)
|
||||
// All executors work in the SAME worktree (or main if no worktree)
|
||||
const executions = batch.map(solutionId => {
|
||||
updateTodo(solutionId, 'in_progress');
|
||||
return dispatchExecutor(solutionId, executor, worktreePath);
|
||||
});
|
||||
|
||||
await Promise.all(executions);
|
||||
batch.forEach(id => updateTodo(id, 'completed'));
|
||||
```
|
||||
|
||||
### Executor Dispatch
|
||||
|
||||
```javascript
|
||||
// worktreePath: path to shared worktree (null if not using worktree)
|
||||
function dispatchExecutor(solutionId, executorType, worktreePath = null) {
|
||||
// If worktree is provided, executor works in that directory
|
||||
// No per-solution worktree creation - ONE worktree for entire queue
|
||||
|
||||
// Pre-defined values (replaced at dispatch time, NOT by executor)
|
||||
const SOLUTION_ID = solutionId;
|
||||
const WORK_DIR = worktreePath || null;
|
||||
|
||||
// Build prompt without markdown code blocks to avoid escaping issues
|
||||
const prompt = `
|
||||
## Execute Solution: ${SOLUTION_ID}
|
||||
${WORK_DIR ? `Working Directory: ${WORK_DIR}` : ''}
|
||||
|
||||
### Step 1: Get Solution Details
|
||||
Run this command to get the full solution with all tasks:
|
||||
ccw issue detail ${SOLUTION_ID}
|
||||
|
||||
### Step 2: Execute All Tasks Sequentially
|
||||
The detail command returns a FULL SOLUTION with all tasks.
|
||||
Execute each task in order (T1 → T2 → T3 → ...):
|
||||
|
||||
For each task:
|
||||
- Follow task.implementation steps
|
||||
- Run task.test commands
|
||||
- Verify task.acceptance criteria
|
||||
- Do NOT commit after each task
|
||||
|
||||
### Step 3: Commit Solution (Once)
|
||||
After ALL tasks pass, commit once with formatted summary.
|
||||
|
||||
Command:
|
||||
git add -A
|
||||
git commit -m "<type>(<scope>): <description>
|
||||
|
||||
Solution: ${SOLUTION_ID}
|
||||
Tasks completed: <list task IDs>
|
||||
|
||||
Changes:
|
||||
- <file1>: <what changed>
|
||||
- <file2>: <what changed>
|
||||
|
||||
Verified: all tests passed"
|
||||
|
||||
Replace <type> with: feat|fix|refactor|docs|test
|
||||
Replace <scope> with: affected module name
|
||||
Replace <description> with: brief summary from solution
|
||||
|
||||
### Step 4: Report Completion
|
||||
On success, run:
|
||||
ccw issue done ${SOLUTION_ID} --result '{"summary": "<brief>", "files_modified": ["<file1>", "<file2>"], "commit": {"hash": "<hash>", "type": "<type>"}, "tasks_completed": <N>}'
|
||||
|
||||
On failure, run:
|
||||
ccw issue done ${SOLUTION_ID} --fail --reason '{"task_id": "<TX>", "error_type": "<test_failure|build_error|other>", "message": "<error details>"}'
|
||||
|
||||
### Important Notes
|
||||
- Do NOT cleanup worktree - it is shared by all solutions in the queue
|
||||
- Replace all <placeholder> values with actual values from your execution
|
||||
`;
|
||||
|
||||
// For CLI tools, pass --cd to set working directory
|
||||
const cdOption = worktreePath ? ` --cd "${worktreePath}"` : '';
|
||||
|
||||
if (executorType === 'codex') {
|
||||
return Bash(
|
||||
`ccw cli -p "${escapePrompt(prompt)}" --tool codex --mode write --id exec-${solutionId}${cdOption}`,
|
||||
{ timeout: 7200000, run_in_background: true } // 2hr for full solution
|
||||
);
|
||||
} else if (executorType === 'gemini') {
|
||||
return Bash(
|
||||
`ccw cli -p "${escapePrompt(prompt)}" --tool gemini --mode write --id exec-${solutionId}${cdOption}`,
|
||||
{ timeout: 3600000, run_in_background: true }
|
||||
);
|
||||
} else {
|
||||
return Task({
|
||||
subagent_type: 'code-developer',
|
||||
run_in_background: false,
|
||||
description: `Execute solution ${solutionId}`,
|
||||
prompt: worktreePath ? `Working directory: ${worktreePath}\n\n${prompt}` : prompt
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Check Next Batch
|
||||
|
||||
```javascript
|
||||
// Refresh DAG after batch completes (use same QUEUE_ID)
|
||||
const refreshedDag = JSON.parse(Bash(`ccw issue queue dag --queue ${QUEUE_ID}`).trim());
|
||||
|
||||
console.log(`
|
||||
## Batch Complete
|
||||
|
||||
- Solutions Completed: ${refreshedDag.completed_count}/${refreshedDag.total}
|
||||
- Next ready: ${refreshedDag.ready_count}
|
||||
`);
|
||||
|
||||
if (refreshedDag.ready_count > 0) {
|
||||
console.log(`Run \`/issue:execute --queue ${QUEUE_ID}\` again for next batch.`);
|
||||
// Note: If resuming, pass existing worktree path:
|
||||
// /issue:execute --queue ${QUEUE_ID} --worktree <worktreePath>
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Worktree Completion (after ALL batches)
|
||||
|
||||
```javascript
|
||||
// Only run when ALL solutions completed AND using worktree
|
||||
if (useWorktree && refreshedDag.ready_count === 0 && refreshedDag.completed_count === refreshedDag.total) {
|
||||
console.log('\n## All Solutions Completed - Worktree Cleanup');
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Queue complete. What to do with worktree branch "${worktreeBranch}"?`,
|
||||
header: 'Merge',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Create PR (Recommended)', description: 'Push branch and create pull request' },
|
||||
{ label: 'Merge to main', description: 'Merge all commits and cleanup worktree' },
|
||||
{ label: 'Keep branch', description: 'Cleanup worktree, keep branch for manual handling' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
const repoRoot = Bash('git rev-parse --show-toplevel').trim();
|
||||
|
||||
if (answer['Merge'].includes('Create PR')) {
|
||||
Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`);
|
||||
Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution - all solutions completed" --head "${worktreeBranch}"`);
|
||||
Bash(`git worktree remove "${worktreePath}"`);
|
||||
console.log(`PR created for branch: ${worktreeBranch}`);
|
||||
} else if (answer['Merge'].includes('Merge to main')) {
|
||||
// Check main is clean
|
||||
const mainDirty = Bash('git status --porcelain').trim();
|
||||
if (mainDirty) {
|
||||
console.log('Warning: Main has uncommitted changes. Falling back to PR.');
|
||||
Bash(`git -C "${worktreePath}" push -u origin "${worktreeBranch}"`);
|
||||
Bash(`gh pr create --title "Queue ${dag.queue_id}" --body "Issue queue execution (main had uncommitted changes)" --head "${worktreeBranch}"`);
|
||||
} else {
|
||||
Bash(`git merge --no-ff "${worktreeBranch}" -m "Merge queue ${dag.queue_id}"`);
|
||||
Bash(`git branch -d "${worktreeBranch}"`);
|
||||
}
|
||||
Bash(`git worktree remove "${worktreePath}"`);
|
||||
} else {
|
||||
Bash(`git worktree remove "${worktreePath}"`);
|
||||
console.log(`Branch ${worktreeBranch} kept for manual handling`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Parallel Execution Model
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Orchestrator │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ 0. Validate QUEUE_ID (required, or prompt user to select) │
|
||||
│ │
|
||||
│ 0.5 (if --worktree) Create ONE worktree for entire queue │
|
||||
│ → .ccw/worktrees/queue-exec-<queue-id> │
|
||||
│ │
|
||||
│ 1. ccw issue queue dag --queue ${QUEUE_ID} │
|
||||
│ → { parallel_batches: [["S-1","S-2"], ["S-3"]] } │
|
||||
│ │
|
||||
│ 2. Dispatch batch 1 (parallel, SAME worktree): │
|
||||
│ ┌──────────────────────────────────────────────────────┐ │
|
||||
│ │ Shared Queue Worktree (or main) │ │
|
||||
│ │ ┌──────────────────┐ ┌──────────────────┐ │ │
|
||||
│ │ │ Executor 1 │ │ Executor 2 │ │ │
|
||||
│ │ │ detail S-1 │ │ detail S-2 │ │ │
|
||||
│ │ │ [T1→T2→T3] │ │ [T1→T2] │ │ │
|
||||
│ │ │ commit S-1 │ │ commit S-2 │ │ │
|
||||
│ │ │ done S-1 │ │ done S-2 │ │ │
|
||||
│ │ └──────────────────┘ └──────────────────┘ │ │
|
||||
│ └──────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ 3. ccw issue queue dag (refresh) │
|
||||
│ → S-3 now ready → dispatch batch 2 (same worktree) │
|
||||
│ │
|
||||
│ 4. (if --worktree) ALL batches complete → cleanup worktree │
|
||||
│ → Prompt: Create PR / Merge to main / Keep branch │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Why this works for parallel:**
|
||||
- **ONE worktree for entire queue** → all solutions share same isolated workspace
|
||||
- `detail <id>` is READ-ONLY → no race conditions
|
||||
- Each executor handles **all tasks within a solution** sequentially
|
||||
- **One commit per solution** with formatted summary (not per-task)
|
||||
- `done <id>` updates only its own solution status
|
||||
- `queue dag` recalculates ready solutions after each batch
|
||||
- Solutions in same batch have NO file conflicts (DAG guarantees)
|
||||
- **Main workspace stays clean** until merge/PR decision
|
||||
|
||||
## CLI Endpoint Contract
|
||||
|
||||
### `ccw issue queue list --brief --json`
|
||||
Returns queue index for selection (used when --queue not provided):
|
||||
```json
|
||||
{
|
||||
"active_queue_id": "QUE-20251215-001",
|
||||
"queues": [
|
||||
{ "id": "QUE-20251215-001", "status": "active", "issue_ids": ["ISS-001"], "total_solutions": 5, "completed_solutions": 2 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `ccw issue queue dag --queue <queue-id>`
|
||||
Returns dependency graph with parallel batches (solution-level, **--queue required**):
|
||||
```json
|
||||
{
|
||||
"queue_id": "QUE-...",
|
||||
"total": 3,
|
||||
"ready_count": 2,
|
||||
"completed_count": 0,
|
||||
"nodes": [
|
||||
{ "id": "S-1", "issue_id": "ISS-xxx", "status": "pending", "ready": true, "task_count": 3 },
|
||||
{ "id": "S-2", "issue_id": "ISS-yyy", "status": "pending", "ready": true, "task_count": 2 },
|
||||
{ "id": "S-3", "issue_id": "ISS-zzz", "status": "pending", "ready": false, "depends_on": ["S-1"] }
|
||||
],
|
||||
"parallel_batches": [["S-1", "S-2"], ["S-3"]]
|
||||
}
|
||||
```
|
||||
|
||||
### `ccw issue detail <item_id>`
|
||||
Returns FULL SOLUTION with all tasks (READ-ONLY):
|
||||
```json
|
||||
{
|
||||
"item_id": "S-1",
|
||||
"issue_id": "ISS-xxx",
|
||||
"solution_id": "SOL-xxx",
|
||||
"status": "pending",
|
||||
"solution": {
|
||||
"id": "SOL-xxx",
|
||||
"approach": "...",
|
||||
"tasks": [
|
||||
{ "id": "T1", "title": "...", "implementation": [...], "test": {...} },
|
||||
{ "id": "T2", "title": "...", "implementation": [...], "test": {...} },
|
||||
{ "id": "T3", "title": "...", "implementation": [...], "test": {...} }
|
||||
],
|
||||
"exploration_context": { "relevant_files": [...] }
|
||||
},
|
||||
"execution_hints": { "executor": "codex", "estimated_minutes": 180 }
|
||||
}
|
||||
```
|
||||
|
||||
### `ccw issue done <item_id>`
|
||||
Marks solution completed/failed, updates queue state, checks for queue completion.
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| No queue | Run /issue:queue first |
|
||||
| No ready solutions | Dependencies blocked, check DAG |
|
||||
| Executor timeout | Solution not marked done, can retry |
|
||||
| Solution failure | Use `ccw issue retry` to reset |
|
||||
| Partial task failure | Executor reports which task failed via `done --fail` |
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/issue:plan` - Plan issues with solutions
|
||||
- `/issue:queue` - Form execution queue
|
||||
- `ccw issue queue dag` - View dependency graph
|
||||
- `ccw issue detail <id>` - View task details
|
||||
- `ccw issue retry` - Reset failed tasks
|
||||
382
.claude/commands/issue/from-brainstorm.md
Normal file
382
.claude/commands/issue/from-brainstorm.md
Normal file
@@ -0,0 +1,382 @@
|
||||
---
|
||||
name: from-brainstorm
|
||||
description: Convert brainstorm session ideas into issue with executable solution for parallel-dev-cycle
|
||||
argument-hint: "SESSION=\"<session-id>\" [--idea=<index>] [--auto] [-y|--yes]"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), Write(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select highest-scored idea, skip confirmations, create issue directly.
|
||||
|
||||
# Issue From-Brainstorm Command (/issue:from-brainstorm)
|
||||
|
||||
## Overview
|
||||
|
||||
Bridge command that converts **brainstorm-with-file** session output into executable **issue + solution** for parallel-dev-cycle consumption.
|
||||
|
||||
**Core workflow**: Load Session → Select Idea → Convert to Issue → Generate Solution → Bind & Ready
|
||||
|
||||
**Input sources**:
|
||||
- **synthesis.json** - Main brainstorm results with top_ideas
|
||||
- **perspectives.json** - Multi-CLI perspectives (creative/pragmatic/systematic)
|
||||
- **.brainstorming/** - Synthesis artifacts (clarifications, enhancements from role analyses)
|
||||
|
||||
**Output**:
|
||||
- **Issue** (ISS-YYYYMMDD-NNN) - Full context with clarifications
|
||||
- **Solution** (SOL-{issue-id}-{uid}) - Structured tasks for parallel-dev-cycle
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```bash
|
||||
# Interactive mode - select idea, confirm before creation
|
||||
/issue:from-brainstorm SESSION="BS-rate-limiting-2025-01-28"
|
||||
|
||||
# Pre-select idea by index
|
||||
/issue:from-brainstorm SESSION="BS-auth-system-2025-01-28" --idea=0
|
||||
|
||||
# Auto mode - select highest scored, no confirmations
|
||||
/issue:from-brainstorm SESSION="BS-caching-2025-01-28" --auto -y
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
| Argument | Required | Type | Default | Description |
|
||||
|----------|----------|------|---------|-------------|
|
||||
| SESSION | Yes | String | - | Session ID or path to `.workflow/.brainstorm/BS-xxx` |
|
||||
| --idea | No | Integer | - | Pre-select idea by index (0-based) |
|
||||
| --auto | No | Flag | false | Auto-select highest-scored idea |
|
||||
| -y, --yes | No | Flag | false | Skip all confirmations |
|
||||
|
||||
## Data Structures
|
||||
|
||||
### Issue Schema (Output)
|
||||
|
||||
```typescript
|
||||
interface Issue {
|
||||
id: string; // ISS-YYYYMMDD-NNN
|
||||
title: string; // From idea.title
|
||||
status: 'planned'; // Auto-set after solution binding
|
||||
priority: number; // 1-5 (derived from idea.score)
|
||||
context: string; // Full description with clarifications
|
||||
source: 'brainstorm';
|
||||
labels: string[]; // ['brainstorm', perspective, feasibility]
|
||||
|
||||
// Structured fields
|
||||
expected_behavior: string; // From key_strengths
|
||||
actual_behavior: string; // From main_challenges
|
||||
affected_components: string[]; // Extracted from description
|
||||
|
||||
_brainstorm_metadata: {
|
||||
session_id: string;
|
||||
idea_score: number;
|
||||
novelty: number;
|
||||
feasibility: string;
|
||||
clarifications_count: number;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Solution Schema (Output)
|
||||
|
||||
```typescript
|
||||
interface Solution {
|
||||
id: string; // SOL-{issue-id}-{4-char-uid}
|
||||
description: string; // idea.title
|
||||
approach: string; // idea.description
|
||||
tasks: Task[]; // Generated from idea.next_steps
|
||||
|
||||
analysis: {
|
||||
risk: 'low' | 'medium' | 'high';
|
||||
impact: 'low' | 'medium' | 'high';
|
||||
complexity: 'low' | 'medium' | 'high';
|
||||
};
|
||||
|
||||
is_bound: boolean; // true
|
||||
created_at: string;
|
||||
bound_at: string;
|
||||
}
|
||||
|
||||
interface Task {
|
||||
id: string; // T1, T2, T3...
|
||||
title: string; // Actionable task name
|
||||
scope: string; // design|implementation|testing|documentation
|
||||
action: string; // Implement|Design|Research|Test|Document
|
||||
description: string;
|
||||
|
||||
implementation: string[]; // Step-by-step guide
|
||||
acceptance: {
|
||||
criteria: string[]; // What defines success
|
||||
verification: string[]; // How to verify
|
||||
};
|
||||
|
||||
priority: number; // 1-5
|
||||
depends_on: string[]; // Task dependencies
|
||||
}
|
||||
```
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Session Loading
|
||||
├─ Validate session path
|
||||
├─ Load synthesis.json (required)
|
||||
├─ Load perspectives.json (optional - multi-CLI insights)
|
||||
├─ Load .brainstorming/** (optional - synthesis artifacts)
|
||||
└─ Validate top_ideas array exists
|
||||
|
||||
Phase 2: Idea Selection
|
||||
├─ Auto mode: Select highest scored idea
|
||||
├─ Pre-selected: Use --idea=N index
|
||||
└─ Interactive: Display table, ask user to select
|
||||
|
||||
Phase 3: Enrich Issue Context
|
||||
├─ Base: idea.description + key_strengths + main_challenges
|
||||
├─ Add: Relevant clarifications (Requirements/Architecture/Feasibility)
|
||||
├─ Add: Multi-perspective insights (creative/pragmatic/systematic)
|
||||
└─ Add: Session metadata (session_id, completion date, clarification count)
|
||||
|
||||
Phase 4: Create Issue
|
||||
├─ Generate issue data with enriched context
|
||||
├─ Calculate priority from idea.score (0-10 → 1-5)
|
||||
├─ Create via: ccw issue create (heredoc for JSON)
|
||||
└─ Returns: ISS-YYYYMMDD-NNN
|
||||
|
||||
Phase 5: Generate Solution Tasks
|
||||
├─ T1: Research & Validate (if main_challenges exist)
|
||||
├─ T2: Design & Specification (if key_strengths exist)
|
||||
├─ T3+: Implementation tasks (from idea.next_steps)
|
||||
└─ Each task includes: implementation steps + acceptance criteria
|
||||
|
||||
Phase 6: Bind Solution
|
||||
├─ Write solution to .workflow/issues/solutions/{issue-id}.jsonl
|
||||
├─ Bind via: ccw issue bind {issue-id} {solution-id}
|
||||
├─ Update issue status to 'planned'
|
||||
└─ Returns: SOL-{issue-id}-{uid}
|
||||
|
||||
Phase 7: Next Steps
|
||||
└─ Offer: Form queue | Convert another idea | View details | Done
|
||||
```
|
||||
|
||||
## Context Enrichment Logic
|
||||
|
||||
### Base Context (Always Included)
|
||||
|
||||
- **Description**: `idea.description`
|
||||
- **Why This Idea**: `idea.key_strengths[]`
|
||||
- **Challenges to Address**: `idea.main_challenges[]`
|
||||
- **Implementation Steps**: `idea.next_steps[]`
|
||||
|
||||
### Enhanced Context (If Available)
|
||||
|
||||
**From Synthesis Artifacts** (`.brainstorming/*/analysis*.md`):
|
||||
- Extract clarifications matching categories: Requirements, Architecture, Feasibility
|
||||
- Format: `**{Category}** ({role}): {question} → {answer}`
|
||||
- Limit: Top 3 most relevant
|
||||
|
||||
**From Perspectives** (`perspectives.json`):
|
||||
- **Creative**: First insight from `perspectives.creative.insights[0]`
|
||||
- **Pragmatic**: First blocker from `perspectives.pragmatic.blockers[0]`
|
||||
- **Systematic**: First pattern from `perspectives.systematic.patterns[0]`
|
||||
|
||||
**Session Metadata**:
|
||||
- Session ID, Topic, Completion Date
|
||||
- Clarifications count (if synthesis artifacts loaded)
|
||||
|
||||
## Task Generation Strategy
|
||||
|
||||
### Task 1: Research & Validation
|
||||
**Trigger**: `idea.main_challenges.length > 0`
|
||||
- **Title**: "Research & Validate Approach"
|
||||
- **Scope**: design
|
||||
- **Action**: Research
|
||||
- **Implementation**: Investigate blockers, review similar implementations, validate with team
|
||||
- **Acceptance**: Blockers documented, feasibility assessed, approach validated
|
||||
|
||||
### Task 2: Design & Specification
|
||||
**Trigger**: `idea.key_strengths.length > 0`
|
||||
- **Title**: "Design & Create Specification"
|
||||
- **Scope**: design
|
||||
- **Action**: Design
|
||||
- **Implementation**: Create design doc, define success criteria, plan phases
|
||||
- **Acceptance**: Design complete, metrics defined, plan outlined
|
||||
|
||||
### Task 3+: Implementation Tasks
|
||||
**Trigger**: `idea.next_steps[]`
|
||||
- **Title**: From `next_steps[i]` (max 60 chars)
|
||||
- **Scope**: Inferred from keywords (test→testing, api→backend, ui→frontend)
|
||||
- **Action**: Detected from verbs (implement, create, update, fix, test, document)
|
||||
- **Implementation**: Execute step + follow design + write tests
|
||||
- **Acceptance**: Step implemented + tests passing + code reviewed
|
||||
|
||||
### Fallback Task
|
||||
**Trigger**: No tasks generated from above
|
||||
- **Title**: `idea.title`
|
||||
- **Scope**: implementation
|
||||
- **Action**: Implement
|
||||
- **Generic implementation + acceptance criteria**
|
||||
|
||||
## Priority Calculation
|
||||
|
||||
### Issue Priority (1-5)
|
||||
```
|
||||
idea.score: 0-10
|
||||
priority = max(1, min(5, ceil((10 - score) / 2)))
|
||||
|
||||
Examples:
|
||||
score 9-10 → priority 1 (critical)
|
||||
score 7-8 → priority 2 (high)
|
||||
score 5-6 → priority 3 (medium)
|
||||
score 3-4 → priority 4 (low)
|
||||
score 0-2 → priority 5 (lowest)
|
||||
```
|
||||
|
||||
### Task Priority (1-5)
|
||||
- Research task: 1 (highest)
|
||||
- Design task: 2
|
||||
- Implementation tasks: 3 by default, decrement for later tasks
|
||||
- Testing/documentation: 4-5
|
||||
|
||||
### Complexity Analysis
|
||||
```
|
||||
risk: main_challenges.length > 2 ? 'high' : 'medium'
|
||||
impact: score >= 8 ? 'high' : score >= 6 ? 'medium' : 'low'
|
||||
complexity: main_challenges > 3 OR tasks > 5 ? 'high'
|
||||
tasks > 3 ? 'medium' : 'low'
|
||||
```
|
||||
|
||||
## CLI Integration
|
||||
|
||||
### Issue Creation
|
||||
```bash
|
||||
# Uses heredoc to avoid shell escaping
|
||||
ccw issue create << 'EOF'
|
||||
{
|
||||
"title": "...",
|
||||
"context": "...",
|
||||
"priority": 3,
|
||||
"source": "brainstorm",
|
||||
"labels": ["brainstorm", "creative", "feasibility-high"],
|
||||
...
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
### Solution Binding
|
||||
```bash
|
||||
# Append solution to JSONL file
|
||||
echo '{"id":"SOL-xxx","tasks":[...]}' >> .workflow/issues/solutions/{issue-id}.jsonl
|
||||
|
||||
# Bind to issue
|
||||
ccw issue bind {issue-id} {solution-id}
|
||||
|
||||
# Update status
|
||||
ccw issue update {issue-id} --status planned
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Message | Resolution |
|
||||
|-------|---------|------------|
|
||||
| Session not found | synthesis.json missing | Check session ID, list available sessions |
|
||||
| No ideas | top_ideas array empty | Complete brainstorm workflow first |
|
||||
| Invalid idea index | Index out of range | Check valid range 0 to N-1 |
|
||||
| Issue creation failed | ccw issue create error | Verify CLI endpoint working |
|
||||
| Solution binding failed | Bind error | Check issue exists, retry |
|
||||
|
||||
## Examples
|
||||
|
||||
### Interactive Mode
|
||||
|
||||
```bash
|
||||
/issue:from-brainstorm SESSION="BS-rate-limiting-2025-01-28"
|
||||
|
||||
# Output:
|
||||
# | # | Title | Score | Feasibility |
|
||||
# |---|-------|-------|-------------|
|
||||
# | 0 | Token Bucket Algorithm | 8.5 | High |
|
||||
# | 1 | Sliding Window Counter | 7.2 | Medium |
|
||||
# | 2 | Fixed Window | 6.1 | High |
|
||||
|
||||
# User selects: #0
|
||||
|
||||
# Result:
|
||||
# ✓ Created issue: ISS-20250128-001
|
||||
# ✓ Created solution: SOL-ISS-20250128-001-ab3d
|
||||
# ✓ Bound solution to issue
|
||||
# → Next: /issue:queue
|
||||
```
|
||||
|
||||
### Auto Mode
|
||||
|
||||
```bash
|
||||
/issue:from-brainstorm SESSION="BS-caching-2025-01-28" --auto
|
||||
|
||||
# Result:
|
||||
# Auto-selected: Redis Cache Layer (Score: 9.2/10)
|
||||
# ✓ Created issue: ISS-20250128-002
|
||||
# ✓ Solution with 4 tasks
|
||||
# → Status: planned
|
||||
```
|
||||
|
||||
## Integration Flow
|
||||
|
||||
```
|
||||
brainstorm-with-file
|
||||
│
|
||||
├─ synthesis.json
|
||||
├─ perspectives.json
|
||||
└─ .brainstorming/** (optional)
|
||||
│
|
||||
▼
|
||||
/issue:from-brainstorm ◄─── This command
|
||||
│
|
||||
├─ ISS-YYYYMMDD-NNN (enriched issue)
|
||||
└─ SOL-{issue-id}-{uid} (structured solution)
|
||||
│
|
||||
▼
|
||||
/issue:queue
|
||||
│
|
||||
▼
|
||||
/parallel-dev-cycle
|
||||
│
|
||||
▼
|
||||
RA → EP → CD → VAS
|
||||
```
|
||||
|
||||
## Session Files Reference
|
||||
|
||||
### Input Files
|
||||
|
||||
```
|
||||
.workflow/.brainstorm/BS-{slug}-{date}/
|
||||
├── synthesis.json # REQUIRED - Top ideas with scores
|
||||
├── perspectives.json # OPTIONAL - Multi-CLI insights
|
||||
├── brainstorm.md # Reference only
|
||||
└── .brainstorming/ # OPTIONAL - Synthesis artifacts
|
||||
├── system-architect/
|
||||
│ └── analysis.md # Contains clarifications + enhancements
|
||||
├── api-designer/
|
||||
│ └── analysis.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Output Files
|
||||
|
||||
```
|
||||
.workflow/issues/
|
||||
├── solutions/
|
||||
│ └── ISS-YYYYMMDD-001.jsonl # Created solution (JSONL)
|
||||
└── (managed by ccw issue CLI)
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/workflow:brainstorm-with-file` - Generate brainstorm sessions
|
||||
- `/workflow:brainstorm:synthesis` - Add clarifications to brainstorm
|
||||
- `/issue:new` - Create issues from GitHub or text
|
||||
- `/issue:plan` - Generate solutions via exploration
|
||||
- `/issue:queue` - Form execution queue
|
||||
- `/issue:execute` - Execute with parallel-dev-cycle
|
||||
- `ccw issue status <id>` - View issue
|
||||
- `ccw issue solution <id>` - View solution
|
||||
416
.claude/commands/issue/new.md
Normal file
416
.claude/commands/issue/new.md
Normal file
@@ -0,0 +1,416 @@
|
||||
---
|
||||
name: new
|
||||
description: Create structured issue from GitHub URL or text description
|
||||
argument-hint: "[-y|--yes] <github-url | text-description> [--priority 1-5]"
|
||||
allowed-tools: TodoWrite(*), Bash(*), Read(*), AskUserQuestion(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip clarification questions, create issue with inferred details.
|
||||
|
||||
# Issue New Command (/issue:new)
|
||||
|
||||
## Core Principle
|
||||
|
||||
**Requirement Clarity Detection** → Ask only when needed
|
||||
|
||||
```
|
||||
Clear Input (GitHub URL, structured text) → Direct creation
|
||||
Unclear Input (vague description) → Minimal clarifying questions
|
||||
```
|
||||
|
||||
## Issue Structure
|
||||
|
||||
```typescript
|
||||
interface Issue {
|
||||
id: string; // GH-123 or ISS-YYYYMMDD-HHMMSS
|
||||
title: string;
|
||||
status: 'registered' | 'planned' | 'queued' | 'in_progress' | 'completed' | 'failed';
|
||||
priority: number; // 1 (critical) to 5 (low)
|
||||
context: string; // Problem description (single source of truth)
|
||||
source: 'github' | 'text' | 'discovery';
|
||||
source_url?: string;
|
||||
labels?: string[];
|
||||
|
||||
// GitHub binding (for non-GitHub sources that publish to GitHub)
|
||||
github_url?: string; // https://github.com/owner/repo/issues/123
|
||||
github_number?: number; // 123
|
||||
|
||||
// Optional structured fields
|
||||
expected_behavior?: string;
|
||||
actual_behavior?: string;
|
||||
affected_components?: string[];
|
||||
|
||||
// Feedback history (failures + human clarifications)
|
||||
feedback?: {
|
||||
type: 'failure' | 'clarification' | 'rejection';
|
||||
stage: string; // new/plan/execute
|
||||
content: string;
|
||||
created_at: string;
|
||||
}[];
|
||||
|
||||
// Solution binding
|
||||
bound_solution_id: string | null;
|
||||
|
||||
// Timestamps
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
}
|
||||
```
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```bash
|
||||
# Clear inputs - direct creation
|
||||
/issue:new https://github.com/owner/repo/issues/123
|
||||
/issue:new "Login fails with special chars. Expected: success. Actual: 500 error"
|
||||
|
||||
# Vague input - will ask clarifying questions
|
||||
/issue:new "something wrong with auth"
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Input Analysis & Clarity Detection
|
||||
|
||||
```javascript
|
||||
const input = userInput.trim();
|
||||
const flags = parseFlags(userInput); // --priority
|
||||
|
||||
// Detect input type and clarity
|
||||
const isGitHubUrl = input.match(/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/);
|
||||
const isGitHubShort = input.match(/^#(\d+)$/);
|
||||
const hasStructure = input.match(/(expected|actual|affects|steps):/i);
|
||||
|
||||
// Clarity score: 0-3
|
||||
let clarityScore = 0;
|
||||
if (isGitHubUrl || isGitHubShort) clarityScore = 3; // GitHub = fully clear
|
||||
else if (hasStructure) clarityScore = 2; // Structured text = clear
|
||||
else if (input.length > 50) clarityScore = 1; // Long text = somewhat clear
|
||||
else clarityScore = 0; // Vague
|
||||
|
||||
let issueData = {};
|
||||
```
|
||||
|
||||
### Phase 2: Data Extraction (GitHub or Text)
|
||||
|
||||
```javascript
|
||||
if (isGitHubUrl || isGitHubShort) {
|
||||
// GitHub - fetch via gh CLI
|
||||
const result = Bash(`gh issue view ${extractIssueRef(input)} --json number,title,body,labels,url`);
|
||||
const gh = JSON.parse(result);
|
||||
issueData = {
|
||||
id: `GH-${gh.number}`,
|
||||
title: gh.title,
|
||||
source: 'github',
|
||||
source_url: gh.url,
|
||||
labels: gh.labels.map(l => l.name),
|
||||
context: gh.body?.substring(0, 500) || gh.title,
|
||||
...parseMarkdownBody(gh.body)
|
||||
};
|
||||
} else {
|
||||
// Text description
|
||||
issueData = {
|
||||
id: `ISS-${new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14)}`,
|
||||
source: 'text',
|
||||
...parseTextDescription(input)
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Lightweight Context Hint (Conditional)
|
||||
|
||||
```javascript
|
||||
// ACE search ONLY for medium clarity (1-2) AND missing components
|
||||
// Skip for: GitHub (has context), vague (needs clarification first)
|
||||
// Note: Deep exploration happens in /issue:plan, this is just a quick hint
|
||||
|
||||
if (clarityScore >= 1 && clarityScore <= 2 && !issueData.affected_components?.length) {
|
||||
const keywords = extractKeywords(issueData.context);
|
||||
|
||||
if (keywords.length >= 2) {
|
||||
try {
|
||||
const aceResult = mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: keywords.slice(0, 3).join(' ')
|
||||
});
|
||||
issueData.affected_components = aceResult.files?.slice(0, 3) || [];
|
||||
} catch {
|
||||
// ACE failure is non-blocking
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Conditional Clarification (Only if Unclear)
|
||||
|
||||
```javascript
|
||||
// ONLY ask questions if clarity is low - simple open-ended prompt
|
||||
if (clarityScore < 2 && (!issueData.context || issueData.context.length < 20)) {
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: 'Please describe the issue in more detail:',
|
||||
header: 'Clarify',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Provide details', description: 'Describe what, where, and expected behavior' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Use custom text input (via "Other")
|
||||
if (answer.customText) {
|
||||
issueData.context = answer.customText;
|
||||
issueData.title = answer.customText.split(/[.\n]/)[0].substring(0, 60);
|
||||
issueData.feedback = [{
|
||||
type: 'clarification',
|
||||
stage: 'new',
|
||||
content: answer.customText,
|
||||
created_at: new Date().toISOString()
|
||||
}];
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: GitHub Publishing Decision (Non-GitHub Sources)
|
||||
|
||||
```javascript
|
||||
// For non-GitHub sources, ask if user wants to publish to GitHub
|
||||
let publishToGitHub = false;
|
||||
|
||||
if (issueData.source !== 'github') {
|
||||
const publishAnswer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: 'Would you like to publish this issue to GitHub?',
|
||||
header: 'Publish',
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: 'Yes, publish to GitHub', description: 'Create issue on GitHub and link it' },
|
||||
{ label: 'No, keep local only', description: 'Store as local issue without GitHub sync' }
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
publishToGitHub = publishAnswer.answers?.['Publish']?.includes('Yes');
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 6: Create Issue
|
||||
|
||||
**Summary Display:**
|
||||
- Show ID, title, source, affected files (if any)
|
||||
|
||||
**Confirmation** (only for vague inputs, clarityScore < 2):
|
||||
- Use `AskUserQuestion` to confirm before creation
|
||||
|
||||
**Issue Creation** (via CLI endpoint):
|
||||
```bash
|
||||
# Option 1: Pipe input (recommended for complex JSON - avoids shell escaping)
|
||||
echo '{"title":"...", "context":"...", "priority":3}' | ccw issue create
|
||||
|
||||
# Option 2: Heredoc (for multi-line JSON)
|
||||
ccw issue create << 'EOF'
|
||||
{"title":"...", "context":"含\"引号\"的内容", "priority":3}
|
||||
EOF
|
||||
|
||||
# Option 3: --data parameter (simple cases only)
|
||||
ccw issue create --data '{"title":"...", "priority":3}'
|
||||
```
|
||||
|
||||
**CLI Endpoint Features:**
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| Auto-increment ID | `ISS-YYYYMMDD-NNN` (e.g., `ISS-20251229-001`) |
|
||||
| Trailing newline | Proper JSONL format, no corruption |
|
||||
| JSON output | Returns created issue with all fields |
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# Create issue via pipe (recommended)
|
||||
echo '{"title": "Login fails with special chars", "context": "500 error when password contains quotes", "priority": 2}' | ccw issue create
|
||||
|
||||
# Or with heredoc for complex JSON
|
||||
ccw issue create << 'EOF'
|
||||
{
|
||||
"title": "Login fails with special chars",
|
||||
"context": "500 error when password contains \"quotes\"",
|
||||
"priority": 2,
|
||||
"source": "text",
|
||||
"expected_behavior": "Login succeeds",
|
||||
"actual_behavior": "500 Internal Server Error"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Output (JSON)
|
||||
{
|
||||
"id": "ISS-20251229-001",
|
||||
"title": "Login fails with special chars",
|
||||
"status": "registered",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
**GitHub Publishing** (if user opted in):
|
||||
```javascript
|
||||
// Step 1: Create local issue FIRST
|
||||
const localIssue = createLocalIssue(issueData); // ccw issue create
|
||||
|
||||
// Step 2: Publish to GitHub if requested
|
||||
if (publishToGitHub) {
|
||||
const ghResult = Bash(`gh issue create --title "${issueData.title}" --body "${issueData.context}"`);
|
||||
// Parse GitHub URL from output
|
||||
const ghUrl = ghResult.match(/https:\/\/github\.com\/[\w-]+\/[\w-]+\/issues\/\d+/)?.[0];
|
||||
const ghNumber = parseInt(ghUrl?.match(/\/issues\/(\d+)/)?.[1]);
|
||||
|
||||
if (ghNumber) {
|
||||
// Step 3: Update local issue with GitHub binding
|
||||
Bash(`ccw issue update ${localIssue.id} --github-url "${ghUrl}" --github-number ${ghNumber}`);
|
||||
// Or via pipe:
|
||||
// echo '{"github_url":"${ghUrl}","github_number":${ghNumber}}' | ccw issue update ${localIssue.id}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Workflow:**
|
||||
```
|
||||
1. Create local issue (ISS-YYYYMMDD-NNN) → stored in .workflow/issues.jsonl
|
||||
2. If publishToGitHub:
|
||||
a. gh issue create → returns GitHub URL
|
||||
b. Update local issue with github_url + github_number binding
|
||||
3. Both local and GitHub issues exist, linked together
|
||||
```
|
||||
|
||||
**Example with GitHub Publishing:**
|
||||
```bash
|
||||
# User creates text issue
|
||||
/issue:new "Login fails with special chars. Expected: success. Actual: 500"
|
||||
|
||||
# System asks: "Would you like to publish this issue to GitHub?"
|
||||
# User selects: "Yes, publish to GitHub"
|
||||
|
||||
# Output:
|
||||
# ✓ Local issue created: ISS-20251229-001
|
||||
# ✓ Published to GitHub: https://github.com/org/repo/issues/123
|
||||
# ✓ GitHub binding saved to local issue
|
||||
# → Next step: /issue:plan ISS-20251229-001
|
||||
|
||||
# Resulting issue JSON:
|
||||
{
|
||||
"id": "ISS-20251229-001",
|
||||
"title": "Login fails with special chars",
|
||||
"source": "text",
|
||||
"github_url": "https://github.com/org/repo/issues/123",
|
||||
"github_number": 123,
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
**Completion:**
|
||||
- Display created issue ID
|
||||
- Show GitHub URL (if published)
|
||||
- Show next step: `/issue:plan <id>`
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Input Analysis
|
||||
└─ Detect clarity score (GitHub URL? Structured text? Keywords?)
|
||||
|
||||
Phase 2: Data Extraction (branched by clarity)
|
||||
┌────────────┬─────────────────┬──────────────┐
|
||||
│ Score 3 │ Score 1-2 │ Score 0 │
|
||||
│ GitHub │ Text + ACE │ Vague │
|
||||
├────────────┼─────────────────┼──────────────┤
|
||||
│ gh CLI │ Parse struct │ AskQuestion │
|
||||
│ → parse │ + quick hint │ (1 question) │
|
||||
│ │ (3 files max) │ → feedback │
|
||||
└────────────┴─────────────────┴──────────────┘
|
||||
|
||||
Phase 3: GitHub Publishing Decision (non-GitHub only)
|
||||
├─ Source = github: Skip (already from GitHub)
|
||||
└─ Source ≠ github: AskUserQuestion
|
||||
├─ Yes → publishToGitHub = true
|
||||
└─ No → publishToGitHub = false
|
||||
|
||||
Phase 4: Create Issue
|
||||
├─ Score ≥ 2: Direct creation
|
||||
└─ Score < 2: Confirm first → Create
|
||||
└─ If publishToGitHub: gh issue create → link URL
|
||||
|
||||
Note: Deep exploration & lifecycle deferred to /issue:plan
|
||||
```
|
||||
|
||||
## Helper Functions
|
||||
|
||||
```javascript
|
||||
function extractKeywords(text) {
|
||||
const stopWords = new Set(['the', 'a', 'an', 'is', 'are', 'was', 'were', 'not', 'with']);
|
||||
return text
|
||||
.toLowerCase()
|
||||
.split(/\W+/)
|
||||
.filter(w => w.length > 3 && !stopWords.has(w))
|
||||
.slice(0, 5);
|
||||
}
|
||||
|
||||
function parseTextDescription(text) {
|
||||
const result = { title: '', context: '' };
|
||||
const sentences = text.split(/\.(?=\s|$)/);
|
||||
|
||||
result.title = sentences[0]?.trim().substring(0, 60) || 'Untitled';
|
||||
result.context = text.substring(0, 500);
|
||||
|
||||
// Extract structured fields if present
|
||||
const expected = text.match(/expected:?\s*([^.]+)/i);
|
||||
const actual = text.match(/actual:?\s*([^.]+)/i);
|
||||
const affects = text.match(/affects?:?\s*([^.]+)/i);
|
||||
|
||||
if (expected) result.expected_behavior = expected[1].trim();
|
||||
if (actual) result.actual_behavior = actual[1].trim();
|
||||
if (affects) {
|
||||
result.affected_components = affects[1].split(/[,\s]+/).filter(c => c.includes('/') || c.includes('.'));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function parseMarkdownBody(body) {
|
||||
if (!body) return {};
|
||||
const result = {};
|
||||
|
||||
const problem = body.match(/##?\s*(problem|description)[:\s]*([\s\S]*?)(?=##|$)/i);
|
||||
const expected = body.match(/##?\s*expected[:\s]*([\s\S]*?)(?=##|$)/i);
|
||||
const actual = body.match(/##?\s*actual[:\s]*([\s\S]*?)(?=##|$)/i);
|
||||
|
||||
if (problem) result.context = problem[2].trim().substring(0, 500);
|
||||
if (expected) result.expected_behavior = expected[2].trim();
|
||||
if (actual) result.actual_behavior = actual[2].trim();
|
||||
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Clear Input (No Questions)
|
||||
|
||||
```bash
|
||||
/issue:new https://github.com/org/repo/issues/42
|
||||
# → Fetches, parses, creates immediately
|
||||
|
||||
/issue:new "Login fails with special chars. Expected: success. Actual: 500"
|
||||
# → Parses structure, creates immediately
|
||||
```
|
||||
|
||||
### Vague Input (1 Question)
|
||||
|
||||
```bash
|
||||
/issue:new "auth broken"
|
||||
# → Asks: "Input unclear. What is the issue about?"
|
||||
# → User provides details → saved to feedback[]
|
||||
# → Creates issue
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/issue:plan` - Plan solution for issue
|
||||
335
.claude/commands/issue/plan.md
Normal file
335
.claude/commands/issue/plan.md
Normal file
@@ -0,0 +1,335 @@
|
||||
---
|
||||
name: plan
|
||||
description: Batch plan issue resolution using issue-plan-agent (explore + plan closed-loop)
|
||||
argument-hint: "[-y|--yes] --all-pending <issue-id>[,<issue-id>,...] [--batch-size 3]"
|
||||
allowed-tools: TodoWrite(*), Task(*), SlashCommand(*), AskUserQuestion(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-bind solutions without confirmation, use recommended settings.
|
||||
|
||||
# Issue Plan Command (/issue:plan)
|
||||
|
||||
## Overview
|
||||
|
||||
Unified planning command using **issue-plan-agent** that combines exploration and planning into a single closed-loop workflow.
|
||||
|
||||
**Behavior:**
|
||||
- Single solution per issue → auto-bind
|
||||
- Multiple solutions → return for user selection
|
||||
- Agent handles file generation
|
||||
|
||||
## Core Guidelines
|
||||
|
||||
**⚠️ Data Access Principle**: Issues and solutions files can grow very large. To avoid context overflow:
|
||||
|
||||
| Operation | Correct | Incorrect |
|
||||
|-----------|---------|-----------|
|
||||
| List issues (brief) | `ccw issue list --status pending --brief` | `Read('issues.jsonl')` |
|
||||
| Read issue details | `ccw issue status <id> --json` | `Read('issues.jsonl')` |
|
||||
| Update status | `ccw issue update <id> --status ...` | Direct file edit |
|
||||
| Bind solution | `ccw issue bind <id> <sol-id>` | Direct file edit |
|
||||
|
||||
**Output Options**:
|
||||
- `--brief`: JSON with minimal fields (id, title, status, priority, tags)
|
||||
- `--json`: Full JSON (agent use only)
|
||||
|
||||
**Orchestration vs Execution**:
|
||||
- **Command (orchestrator)**: Use `--brief` for minimal context
|
||||
- **Agent (executor)**: Fetch full details → `ccw issue status <id> --json`
|
||||
|
||||
**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `solutions/*.jsonl` directly.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/issue:plan [<issue-id>[,<issue-id>,...]] [FLAGS]
|
||||
|
||||
# Examples
|
||||
/issue:plan # Default: --all-pending
|
||||
/issue:plan GH-123 # Single issue
|
||||
/issue:plan GH-123,GH-124,GH-125 # Batch (up to 3)
|
||||
/issue:plan --all-pending # All pending issues (explicit)
|
||||
|
||||
# Flags
|
||||
--batch-size <n> Max issues per agent batch (default: 3)
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Issue Loading & Intelligent Grouping
|
||||
├─ Parse input (single, comma-separated, or --all-pending)
|
||||
├─ Fetch issue metadata (ID, title, tags)
|
||||
├─ Validate issues exist (create if needed)
|
||||
└─ Intelligent grouping via Gemini (semantic similarity, max 3 per batch)
|
||||
|
||||
Phase 2: Unified Explore + Plan (issue-plan-agent)
|
||||
├─ Launch issue-plan-agent per batch
|
||||
├─ Agent performs:
|
||||
│ ├─ ACE semantic search for each issue
|
||||
│ ├─ Codebase exploration (files, patterns, dependencies)
|
||||
│ ├─ Solution generation with task breakdown
|
||||
│ └─ Conflict detection across issues
|
||||
└─ Output: solution JSON per issue
|
||||
|
||||
Phase 3: Solution Registration & Binding
|
||||
├─ Append solutions to solutions/{issue-id}.jsonl
|
||||
├─ Single solution per issue → auto-bind
|
||||
├─ Multiple candidates → AskUserQuestion to select
|
||||
└─ Update issues.jsonl with bound_solution_id
|
||||
|
||||
Phase 4: Summary
|
||||
├─ Display bound solutions
|
||||
├─ Show task counts per issue
|
||||
└─ Display next steps (/issue:queue)
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Issue Loading (Brief Info Only)
|
||||
|
||||
```javascript
|
||||
const batchSize = flags.batchSize || 3;
|
||||
let issues = []; // {id, title, tags} - brief info for grouping only
|
||||
|
||||
// Default to --all-pending if no input provided
|
||||
const useAllPending = flags.allPending || !userInput || userInput.trim() === '';
|
||||
|
||||
if (useAllPending) {
|
||||
// Get pending issues with brief metadata via CLI
|
||||
const result = Bash(`ccw issue list --status pending,registered --json`).trim();
|
||||
const parsed = result ? JSON.parse(result) : [];
|
||||
issues = parsed.map(i => ({ id: i.id, title: i.title || '', tags: i.tags || [] }));
|
||||
|
||||
if (issues.length === 0) {
|
||||
console.log('No pending issues found.');
|
||||
return;
|
||||
}
|
||||
console.log(`Found ${issues.length} pending issues`);
|
||||
} else {
|
||||
// Parse comma-separated issue IDs, fetch brief metadata
|
||||
const ids = userInput.includes(',')
|
||||
? userInput.split(',').map(s => s.trim())
|
||||
: [userInput.trim()];
|
||||
|
||||
for (const id of ids) {
|
||||
Bash(`ccw issue init ${id} --title "Issue ${id}" 2>/dev/null || true`);
|
||||
const info = Bash(`ccw issue status ${id} --json`).trim();
|
||||
const parsed = info ? JSON.parse(info) : {};
|
||||
issues.push({ id, title: parsed.title || '', tags: parsed.tags || [] });
|
||||
}
|
||||
}
|
||||
// Note: Agent fetches full issue content via `ccw issue status <id> --json`
|
||||
|
||||
// Intelligent grouping: Analyze issues by title/tags, group semantically similar ones
|
||||
// Strategy: Same module/component, related bugs, feature clusters
|
||||
// Constraint: Max ${batchSize} issues per batch
|
||||
|
||||
console.log(`Processing ${issues.length} issues in ${batches.length} batch(es)`);
|
||||
|
||||
TodoWrite({
|
||||
todos: batches.map((_, i) => ({
|
||||
content: `Plan batch ${i+1}`,
|
||||
status: 'pending',
|
||||
activeForm: `Planning batch ${i+1}`
|
||||
}))
|
||||
});
|
||||
```
|
||||
|
||||
### Phase 2: Unified Explore + Plan (issue-plan-agent) - PARALLEL
|
||||
|
||||
```javascript
|
||||
Bash(`mkdir -p .workflow/issues/solutions`);
|
||||
const pendingSelections = []; // Collect multi-solution issues for user selection
|
||||
const agentResults = []; // Collect all agent results for conflict aggregation
|
||||
|
||||
// Build prompts for all batches
|
||||
const agentTasks = batches.map((batch, batchIndex) => {
|
||||
const issueList = batch.map(i => `- ${i.id}: ${i.title}${i.tags.length ? ` [${i.tags.join(', ')}]` : ''}`).join('\n');
|
||||
const batchIds = batch.map(i => i.id);
|
||||
|
||||
const issuePrompt = `
|
||||
## Plan Issues
|
||||
|
||||
**Issues** (grouped by similarity):
|
||||
${issueList}
|
||||
|
||||
**Project Root**: ${process.cwd()}
|
||||
|
||||
### Project Context (MANDATORY)
|
||||
1. Read: .workflow/project-tech.json (technology stack, architecture)
|
||||
2. Read: .workflow/project-guidelines.json (constraints and conventions)
|
||||
|
||||
### Workflow
|
||||
1. Fetch issue details: ccw issue status <id> --json
|
||||
2. **Analyze failure history** (if issue.feedback exists):
|
||||
- Extract failure details from issue.feedback (type='failure', stage='execute')
|
||||
- Parse error_type, message, task_id, solution_id from content JSON
|
||||
- Identify failure patterns: repeated errors, root causes, blockers
|
||||
- **Constraint**: Avoid repeating failed approaches
|
||||
3. Load project context files
|
||||
4. Explore codebase (ACE semantic search)
|
||||
5. Plan solution with tasks (schema: solution-schema.json)
|
||||
- **If previous solution failed**: Reference failure analysis in solution.approach
|
||||
- Add explicit verification steps to prevent same failure mode
|
||||
6. **If github_url exists**: Add final task to comment on GitHub issue
|
||||
7. Write solution to: .workflow/issues/solutions/{issue-id}.jsonl
|
||||
8. **CRITICAL - Binding Decision**:
|
||||
- Single solution → **MUST execute**: ccw issue bind <issue-id> <solution-id>
|
||||
- Multiple solutions → Return pending_selection only (no bind)
|
||||
|
||||
### Failure-Aware Planning Rules
|
||||
- **Extract failure patterns**: Parse issue.feedback where type='failure' and stage='execute'
|
||||
- **Identify root causes**: Analyze error_type (test_failure, compilation, timeout, etc.)
|
||||
- **Design alternative approach**: Create solution that addresses root cause
|
||||
- **Add prevention steps**: Include explicit verification to catch same error earlier
|
||||
- **Document lessons**: Reference previous failures in solution.approach
|
||||
|
||||
### Rules
|
||||
- Solution ID format: SOL-{issue-id}-{uid} (uid: 4 random alphanumeric chars, e.g., a7x9)
|
||||
- Single solution per issue → auto-bind via ccw issue bind
|
||||
- Multiple solutions → register only, return pending_selection
|
||||
- Tasks must have quantified acceptance.criteria
|
||||
|
||||
### Return Summary
|
||||
{"bound":[{"issue_id":"...","solution_id":"...","task_count":N}],"pending_selection":[{"issue_id":"...","solutions":[{"id":"...","description":"...","task_count":N}]}]}
|
||||
`;
|
||||
|
||||
return { batchIndex, batchIds, issuePrompt, batch };
|
||||
});
|
||||
|
||||
// Launch agents in parallel (max 10 concurrent)
|
||||
const MAX_PARALLEL = 10;
|
||||
for (let i = 0; i < agentTasks.length; i += MAX_PARALLEL) {
|
||||
const chunk = agentTasks.slice(i, i + MAX_PARALLEL);
|
||||
const taskIds = [];
|
||||
|
||||
// Launch chunk in parallel
|
||||
for (const { batchIndex, batchIds, issuePrompt, batch } of chunk) {
|
||||
updateTodo(`Plan batch ${batchIndex + 1}`, 'in_progress');
|
||||
const taskId = Task(
|
||||
subagent_type="issue-plan-agent",
|
||||
run_in_background=true,
|
||||
description=`Explore & plan ${batch.length} issues: ${batchIds.join(', ')}`,
|
||||
prompt=issuePrompt
|
||||
);
|
||||
taskIds.push({ taskId, batchIndex });
|
||||
}
|
||||
|
||||
console.log(`Launched ${taskIds.length} agents (batch ${i/MAX_PARALLEL + 1}/${Math.ceil(agentTasks.length/MAX_PARALLEL)})...`);
|
||||
|
||||
// Collect results from this chunk
|
||||
for (const { taskId, batchIndex } of taskIds) {
|
||||
const result = TaskOutput(task_id=taskId, block=true);
|
||||
|
||||
// Extract JSON from potential markdown code blocks (agent may wrap in ```json...```)
|
||||
const jsonText = extractJsonFromMarkdown(result);
|
||||
let summary;
|
||||
try {
|
||||
summary = JSON.parse(jsonText);
|
||||
} catch (e) {
|
||||
console.log(`⚠ Batch ${batchIndex + 1}: Failed to parse agent result, skipping`);
|
||||
updateTodo(`Plan batch ${batchIndex + 1}`, 'completed');
|
||||
continue;
|
||||
}
|
||||
agentResults.push(summary); // Store for Phase 3 conflict aggregation
|
||||
|
||||
// Verify binding for bound issues (agent should have executed bind)
|
||||
for (const item of summary.bound || []) {
|
||||
const status = JSON.parse(Bash(`ccw issue status ${item.issue_id} --json`).trim());
|
||||
if (status.bound_solution_id === item.solution_id) {
|
||||
console.log(`✓ ${item.issue_id}: ${item.solution_id} (${item.task_count} tasks)`);
|
||||
} else {
|
||||
// Fallback: agent failed to bind, execute here
|
||||
Bash(`ccw issue bind ${item.issue_id} ${item.solution_id}`);
|
||||
console.log(`✓ ${item.issue_id}: ${item.solution_id} (${item.task_count} tasks) [recovered]`);
|
||||
}
|
||||
}
|
||||
// Collect pending selections for Phase 3
|
||||
for (const pending of summary.pending_selection || []) {
|
||||
pendingSelections.push(pending);
|
||||
}
|
||||
updateTodo(`Plan batch ${batchIndex + 1}`, 'completed');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Solution Selection (if pending)
|
||||
|
||||
```javascript
|
||||
// Handle multi-solution issues
|
||||
for (const pending of pendingSelections) {
|
||||
if (pending.solutions.length === 0) continue;
|
||||
|
||||
const options = pending.solutions.slice(0, 4).map(sol => ({
|
||||
label: `${sol.id} (${sol.task_count} tasks)`,
|
||||
description: sol.description || sol.approach || 'No description'
|
||||
}));
|
||||
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Issue ${pending.issue_id}: which solution to bind?`,
|
||||
header: pending.issue_id,
|
||||
options: options,
|
||||
multiSelect: false
|
||||
}]
|
||||
});
|
||||
|
||||
const selected = answer[Object.keys(answer)[0]];
|
||||
if (!selected || selected === 'Other') continue;
|
||||
|
||||
const solId = selected.split(' ')[0];
|
||||
Bash(`ccw issue bind ${pending.issue_id} ${solId}`);
|
||||
console.log(`✓ ${pending.issue_id}: ${solId} bound`);
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Summary
|
||||
|
||||
```javascript
|
||||
// Count planned issues via CLI
|
||||
const planned = JSON.parse(Bash(`ccw issue list --status planned --brief`) || '[]');
|
||||
const plannedCount = planned.length;
|
||||
|
||||
console.log(`
|
||||
## Done: ${issues.length} issues → ${plannedCount} planned
|
||||
|
||||
Next: \`/issue:queue\` → \`/issue:execute\`
|
||||
`);
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Issue not found | Auto-create in issues.jsonl |
|
||||
| ACE search fails | Agent falls back to ripgrep |
|
||||
| No solutions generated | Display error, suggest manual planning |
|
||||
| User cancels selection | Skip issue, continue with others |
|
||||
| File conflicts | Agent detects and suggests resolution order |
|
||||
|
||||
## Bash Compatibility
|
||||
|
||||
**Avoid**: `$(cmd)`, `$var`, `for` loops — will be escaped incorrectly
|
||||
|
||||
**Use**: Simple commands + `&&` chains, quote comma params `"pending,registered"`
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before completing, verify:
|
||||
|
||||
- [ ] All input issues have solutions in `solutions/{issue-id}.jsonl`
|
||||
- [ ] Single solution issues are auto-bound (`bound_solution_id` set)
|
||||
- [ ] Multi-solution issues returned in `pending_selection` for user choice
|
||||
- [ ] Each solution has executable tasks with `modification_points`
|
||||
- [ ] Task acceptance criteria are quantified (not vague)
|
||||
- [ ] Conflicts detected and reported (if multiple issues touch same files)
|
||||
- [ ] Issue status updated to `planned` after binding
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/issue:queue` - Form execution queue from bound solutions
|
||||
- `ccw issue list` - List all issues
|
||||
- `ccw issue status` - View issue and solution details
|
||||
445
.claude/commands/issue/queue.md
Normal file
445
.claude/commands/issue/queue.md
Normal file
@@ -0,0 +1,445 @@
|
||||
---
|
||||
name: queue
|
||||
description: Form execution queue from bound solutions using issue-queue-agent (solution-level)
|
||||
argument-hint: "[-y|--yes] [--queues <n>] [--issue <id>]"
|
||||
allowed-tools: TodoWrite(*), Task(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm queue formation, use recommended conflict resolutions.
|
||||
|
||||
# Issue Queue Command (/issue:queue)
|
||||
|
||||
## Overview
|
||||
|
||||
Queue formation command using **issue-queue-agent** that analyzes all bound solutions, resolves **inter-solution** conflicts, and creates an ordered execution queue at **solution level**.
|
||||
|
||||
**Design Principle**: Queue items are **solutions**, not individual tasks. Each executor receives a complete solution with all its tasks.
|
||||
|
||||
## Core Capabilities
|
||||
|
||||
- **Agent-driven**: issue-queue-agent handles all ordering logic
|
||||
- **Solution-level granularity**: Queue items are solutions, not tasks
|
||||
- **Conflict clarification**: High-severity conflicts prompt user decision
|
||||
- Semantic priority calculation per solution (0.0-1.0)
|
||||
- Parallel/Sequential group assignment for solutions
|
||||
|
||||
## Core Guidelines
|
||||
|
||||
**⚠️ Data Access Principle**: Issues and queue files can grow very large. To avoid context overflow:
|
||||
|
||||
| Operation | Correct | Incorrect |
|
||||
|-----------|---------|-----------|
|
||||
| List issues (brief) | `ccw issue list --status planned --brief` | `Read('issues.jsonl')` |
|
||||
| **Batch solutions (NEW)** | `ccw issue solutions --status planned --brief` | Loop `ccw issue solution <id>` |
|
||||
| List queue (brief) | `ccw issue queue --brief` | `Read('queues/*.json')` |
|
||||
| Read issue details | `ccw issue status <id> --json` | `Read('issues.jsonl')` |
|
||||
| Get next item | `ccw issue next --json` | `Read('queues/*.json')` |
|
||||
| Update status | `ccw issue update <id> --status ...` | Direct file edit |
|
||||
| Sync from queue | `ccw issue update --from-queue` | Direct file edit |
|
||||
| Read solution (single) | `ccw issue solution <id> --brief` | `Read('solutions/*.jsonl')` |
|
||||
|
||||
**Output Options**:
|
||||
- `--brief`: JSON with minimal fields (id, status, counts)
|
||||
- `--json`: Full JSON (agent use only)
|
||||
|
||||
**Orchestration vs Execution**:
|
||||
- **Command (orchestrator)**: Use `--brief` for minimal context
|
||||
- **Agent (executor)**: Fetch full details → `ccw issue status <id> --json`
|
||||
|
||||
**ALWAYS** use CLI commands for CRUD operations. **NEVER** read entire `issues.jsonl` or `queues/*.json` directly.
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/issue:queue [FLAGS]
|
||||
|
||||
# Examples
|
||||
/issue:queue # Form NEW queue from all bound solutions
|
||||
/issue:queue --queues 3 # Form 3 parallel queues (solutions distributed)
|
||||
/issue:queue --issue GH-123 # Form queue for specific issue only
|
||||
/issue:queue --append GH-124 # Append to active queue
|
||||
/issue:queue --list # List all queues (history)
|
||||
/issue:queue --switch QUE-xxx # Switch active queue
|
||||
/issue:queue --archive # Archive completed active queue
|
||||
|
||||
# Flags
|
||||
--queues <n> Number of parallel queues (default: 1)
|
||||
--issue <id> Form queue for specific issue only
|
||||
--append <id> Append issue to active queue (don't create new)
|
||||
--force Skip active queue check, always create new queue
|
||||
|
||||
# CLI subcommands (ccw issue queue ...)
|
||||
ccw issue queue list List all queues with status
|
||||
ccw issue queue add <issue-id> Add issue to queue (interactive if active queue exists)
|
||||
ccw issue queue add <issue-id> -f Add to new queue without prompt (force)
|
||||
ccw issue queue merge <src> --queue <target> Merge source queue into target queue
|
||||
ccw issue queue switch <queue-id> Switch active queue
|
||||
ccw issue queue archive Archive current queue
|
||||
ccw issue queue delete <queue-id> Delete queue from history
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Solution Loading & Distribution
|
||||
├─ Load issues.jsonl, filter by status='planned' + bound_solution_id
|
||||
├─ Read solutions/{issue-id}.jsonl, find bound solution
|
||||
├─ Extract files_touched from task modification_points
|
||||
├─ Build solution objects array
|
||||
└─ If --queues > 1: Partition solutions into N groups (minimize cross-group file conflicts)
|
||||
|
||||
Phase 2-4: Agent-Driven Queue Formation (issue-queue-agent)
|
||||
├─ Generate N queue IDs (QUE-xxx-1, QUE-xxx-2, ...)
|
||||
├─ If --queues == 1: Launch single issue-queue-agent
|
||||
├─ If --queues > 1: Launch N issue-queue-agents IN PARALLEL
|
||||
├─ Each agent performs:
|
||||
│ ├─ Conflict analysis (5 types via Gemini CLI)
|
||||
│ ├─ Build dependency DAG from conflicts
|
||||
│ ├─ Calculate semantic priority per solution
|
||||
│ └─ Assign execution groups (parallel/sequential)
|
||||
└─ Each agent writes: queue JSON + index update (NOT active yet)
|
||||
|
||||
Phase 5: Conflict Clarification (if needed)
|
||||
├─ Collect `clarifications` arrays from all agents
|
||||
├─ If clarifications exist → AskUserQuestion (batched)
|
||||
├─ Pass user decisions back to respective agents (resume)
|
||||
└─ Agents update queues with resolved conflicts
|
||||
|
||||
Phase 6: Status Update & Summary
|
||||
├─ Update issue statuses to 'queued'
|
||||
└─ Display new queue summary (N queues)
|
||||
|
||||
Phase 7: Active Queue Check & Decision (REQUIRED)
|
||||
├─ Read queue index: ccw issue queue list --brief
|
||||
├─ Get generated queue ID from agent output
|
||||
├─ If NO active queue exists:
|
||||
│ ├─ Set generated queue as active_queue_id
|
||||
│ ├─ Update index.json
|
||||
│ └─ Display: "Queue created and activated"
|
||||
│
|
||||
└─ If active queue exists with items:
|
||||
├─ Display both queues to user
|
||||
├─ Use AskUserQuestion to prompt:
|
||||
│ ├─ "Use new queue (keep existing)" → Set new as active, keep old inactive
|
||||
│ ├─ "Merge: add new items to existing" → Merge new → existing, delete new
|
||||
│ ├─ "Merge: add existing items to new" → Merge existing → new, archive old
|
||||
│ └─ "Cancel" → Delete new queue, keep existing active
|
||||
└─ Execute chosen action
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Solution Loading & Distribution
|
||||
|
||||
**Data Loading:**
|
||||
- Use `ccw issue solutions --status planned --brief` to get all planned issues with solutions in **one call**
|
||||
- Returns: Array of `{ issue_id, solution_id, is_bound, task_count, files_touched[], priority }`
|
||||
- If no bound solutions found → display message, suggest `/issue:plan`
|
||||
|
||||
**Build Solution Objects:**
|
||||
```javascript
|
||||
// Single CLI call replaces N individual queries
|
||||
const result = Bash(`ccw issue solutions --status planned --brief`).trim();
|
||||
const solutions = result ? JSON.parse(result) : [];
|
||||
|
||||
if (solutions.length === 0) {
|
||||
console.log('No bound solutions found. Run /issue:plan first.');
|
||||
return;
|
||||
}
|
||||
|
||||
// solutions already in correct format:
|
||||
// { issue_id, solution_id, is_bound, task_count, files_touched[], priority }
|
||||
```
|
||||
|
||||
**Multi-Queue Distribution** (if `--queues > 1`):
|
||||
- Use `files_touched` from brief output for partitioning
|
||||
- Group solutions with overlapping files into same queue
|
||||
|
||||
**Output:** Array of solution objects (or N arrays if multi-queue)
|
||||
|
||||
### Phase 2-4: Agent-Driven Queue Formation
|
||||
|
||||
**Generate Queue IDs** (command layer, pass to agent):
|
||||
```javascript
|
||||
const timestamp = new Date().toISOString().replace(/[-:T]/g, '').slice(0, 14);
|
||||
const numQueues = args.queues || 1;
|
||||
const queueIds = numQueues === 1
|
||||
? [`QUE-${timestamp}`]
|
||||
: Array.from({length: numQueues}, (_, i) => `QUE-${timestamp}-${i + 1}`);
|
||||
```
|
||||
|
||||
**Agent Prompt** (same for each queue, with assigned solutions):
|
||||
```
|
||||
## Order Solutions into Execution Queue
|
||||
|
||||
**Queue ID**: ${queueId}
|
||||
**Solutions**: ${solutions.length} from ${issues.length} issues
|
||||
**Project Root**: ${cwd}
|
||||
**Queue Index**: ${queueIndex} of ${numQueues}
|
||||
|
||||
### Input
|
||||
${JSON.stringify(solutions)}
|
||||
// Each object: { issue_id, solution_id, task_count, files_touched[], priority }
|
||||
|
||||
### Workflow
|
||||
|
||||
Step 1: Build dependency graph from solutions (nodes=solutions, edges=file conflicts via files_touched)
|
||||
Step 2: Use Gemini CLI for conflict analysis (5 types: file, API, data, dependency, architecture)
|
||||
Step 3: For high-severity conflicts without clear resolution → add to `clarifications`
|
||||
Step 4: Calculate semantic priority (base from issue priority + task_count boost)
|
||||
Step 5: Assign execution groups: P* (parallel, no overlaps) / S* (sequential, shared files)
|
||||
Step 6: Write queue JSON + update index
|
||||
|
||||
### Output Requirements
|
||||
|
||||
**Write files** (exactly 2):
|
||||
- `.workflow/issues/queues/${queueId}.json` - Full queue with solutions, conflicts, groups
|
||||
- `.workflow/issues/queues/index.json` - Update with new queue entry
|
||||
|
||||
**Return JSON**:
|
||||
\`\`\`json
|
||||
{
|
||||
"queue_id": "${queueId}",
|
||||
"total_solutions": N,
|
||||
"total_tasks": N,
|
||||
"execution_groups": [{"id": "P1", "type": "parallel", "count": N}],
|
||||
"issues_queued": ["ISS-xxx"],
|
||||
"clarifications": [{"conflict_id": "CFT-1", "question": "...", "options": [...]}]
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Rules
|
||||
- Solution granularity (NOT individual tasks)
|
||||
- Queue Item ID format: S-1, S-2, S-3, ...
|
||||
- Use provided Queue ID (do NOT generate new)
|
||||
- `clarifications` only present if high-severity unresolved conflicts exist
|
||||
- Use `files_touched` from input (already extracted by orchestrator)
|
||||
|
||||
### Done Criteria
|
||||
- [ ] Queue JSON written with all solutions ordered
|
||||
- [ ] Index updated with active_queue_id
|
||||
- [ ] No circular dependencies
|
||||
- [ ] Parallel groups have no file overlaps
|
||||
- [ ] Return JSON matches required shape
|
||||
```
|
||||
|
||||
**Launch Agents** (parallel if multi-queue):
|
||||
```javascript
|
||||
const numQueues = args.queues || 1;
|
||||
|
||||
if (numQueues === 1) {
|
||||
// Single queue: single agent call
|
||||
const result = Task(
|
||||
subagent_type="issue-queue-agent",
|
||||
prompt=buildPrompt(queueIds[0], solutions),
|
||||
description=`Order ${solutions.length} solutions`
|
||||
);
|
||||
} else {
|
||||
// Multi-queue: parallel agent calls (single message with N Task calls)
|
||||
const agentPromises = solutionGroups.map((group, i) =>
|
||||
Task(
|
||||
subagent_type="issue-queue-agent",
|
||||
prompt=buildPrompt(queueIds[i], group, i + 1, numQueues),
|
||||
description=`Queue ${i + 1}/${numQueues}: ${group.length} solutions`
|
||||
)
|
||||
);
|
||||
// All agents launched in parallel via single message with multiple Task tool calls
|
||||
}
|
||||
```
|
||||
|
||||
**Multi-Queue Index Update:**
|
||||
- First queue sets `active_queue_id`
|
||||
- All queues added to `queues` array with `queue_group` field linking them
|
||||
|
||||
### Phase 5: Conflict Clarification
|
||||
|
||||
**Collect Agent Results** (multi-queue):
|
||||
```javascript
|
||||
// Collect clarifications from all agents
|
||||
const allClarifications = results.flatMap((r, i) =>
|
||||
(r.clarifications || []).map(c => ({ ...c, queue_id: queueIds[i], agent_id: agentIds[i] }))
|
||||
);
|
||||
```
|
||||
|
||||
**Check Agent Return:**
|
||||
- Parse agent result JSON (or all results if multi-queue)
|
||||
- If any `clarifications` array exists and non-empty → user decision required
|
||||
|
||||
**Clarification Flow:**
|
||||
```javascript
|
||||
if (allClarifications.length > 0) {
|
||||
for (const clarification of allClarifications) {
|
||||
// Present to user via AskUserQuestion
|
||||
const answer = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `[${clarification.queue_id}] ${clarification.question}`,
|
||||
header: clarification.conflict_id,
|
||||
options: clarification.options,
|
||||
multiSelect: false
|
||||
}]
|
||||
});
|
||||
|
||||
// Resume respective agent with user decision
|
||||
Task(
|
||||
subagent_type="issue-queue-agent",
|
||||
resume=clarification.agent_id,
|
||||
prompt=`Conflict ${clarification.conflict_id} resolved: ${answer.selected}`
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 6: Status Update & Summary
|
||||
|
||||
**Status Update** (MUST use CLI command, NOT direct file operations):
|
||||
|
||||
```bash
|
||||
# Option 1: Batch update from queue (recommended)
|
||||
ccw issue update --from-queue [queue-id] --json
|
||||
ccw issue update --from-queue --json # Use active queue
|
||||
ccw issue update --from-queue QUE-xxx --json # Use specific queue
|
||||
|
||||
# Option 2: Individual issue update
|
||||
ccw issue update <issue-id> --status queued
|
||||
```
|
||||
|
||||
**⚠️ IMPORTANT**: Do NOT directly modify `issues.jsonl`. Always use CLI command to ensure proper validation and history tracking.
|
||||
|
||||
**Output** (JSON):
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"queue_id": "QUE-xxx",
|
||||
"queued": ["ISS-001", "ISS-002"],
|
||||
"queued_count": 2,
|
||||
"unplanned": ["ISS-003"],
|
||||
"unplanned_count": 1
|
||||
}
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- Updates issues in queue to `status: 'queued'` (skips already queued/executing/completed)
|
||||
- Identifies planned issues with `bound_solution_id` NOT in queue → `unplanned` array
|
||||
- Optional `queue-id`: defaults to active queue if omitted
|
||||
|
||||
**Summary Output:**
|
||||
- Display queue ID, solution count, task count
|
||||
- Show unplanned issues (planned but NOT in queue)
|
||||
- Show next step: `/issue:execute`
|
||||
|
||||
### Phase 7: Active Queue Check & Decision
|
||||
|
||||
**After agent completes Phase 1-6, check for active queue:**
|
||||
|
||||
```bash
|
||||
ccw issue queue list --brief
|
||||
```
|
||||
|
||||
**Decision:**
|
||||
- If `active_queue_id` is null → `ccw issue queue switch <new-queue-id>` (activate new queue)
|
||||
- If active queue exists → Use **AskUserQuestion** to prompt user
|
||||
|
||||
**AskUserQuestion:**
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Active queue exists. How would you like to proceed?",
|
||||
header: "Queue Action",
|
||||
options: [
|
||||
{ label: "Merge into existing queue", description: "Add new items to active queue, delete new queue" },
|
||||
{ label: "Use new queue", description: "Switch to new queue, keep existing in history" },
|
||||
{ label: "Cancel", description: "Delete new queue, keep existing active" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**Action Commands:**
|
||||
|
||||
| User Choice | Commands |
|
||||
|-------------|----------|
|
||||
| **Merge into existing** | `ccw issue queue merge <new-queue-id> --queue <active-queue-id>` then `ccw issue queue delete <new-queue-id>` |
|
||||
| **Use new queue** | `ccw issue queue switch <new-queue-id>` |
|
||||
| **Cancel** | `ccw issue queue delete <new-queue-id>` |
|
||||
|
||||
## Storage Structure (Queue History)
|
||||
|
||||
```
|
||||
.workflow/issues/
|
||||
├── issues.jsonl # All issues (one per line)
|
||||
├── queues/ # Queue history directory
|
||||
│ ├── index.json # Queue index (active + history)
|
||||
│ ├── {queue-id}.json # Individual queue files
|
||||
│ └── ...
|
||||
└── solutions/
|
||||
├── {issue-id}.jsonl # Solutions for issue
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Queue Index Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"active_queue_id": "QUE-20251227-143000",
|
||||
"active_queue_group": "QGR-20251227-143000",
|
||||
"queues": [
|
||||
{
|
||||
"id": "QUE-20251227-143000-1",
|
||||
"queue_group": "QGR-20251227-143000",
|
||||
"queue_index": 1,
|
||||
"total_queues": 3,
|
||||
"status": "active",
|
||||
"issue_ids": ["ISS-xxx", "ISS-yyy"],
|
||||
"total_solutions": 3,
|
||||
"completed_solutions": 1,
|
||||
"created_at": "2025-12-27T14:30:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Multi-Queue Fields:**
|
||||
- `queue_group`: Links queues created in same batch (format: `QGR-{timestamp}`)
|
||||
- `queue_index`: Position in group (1-based)
|
||||
- `total_queues`: Total queues in group
|
||||
- `active_queue_group`: Current active group (for multi-queue execution)
|
||||
|
||||
**Note**: Queue file schema is produced by `issue-queue-agent`. See agent documentation for details.
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| No bound solutions | Display message, suggest /issue:plan |
|
||||
| Circular dependency | List cycles, abort queue formation |
|
||||
| High-severity conflict | Return `clarifications`, prompt user decision |
|
||||
| User cancels clarification | Abort queue formation |
|
||||
| **index.json not updated** | Auto-fix: Set active_queue_id to new queue |
|
||||
| **Queue file missing solutions** | Abort with error, agent must regenerate |
|
||||
| **User cancels queue add** | Display message, return without changes |
|
||||
| **Merge with empty source** | Skip merge, display warning |
|
||||
| **All items duplicate** | Skip merge, display "All items already exist" |
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before completing, verify:
|
||||
|
||||
- [ ] All planned issues with `bound_solution_id` are included
|
||||
- [ ] Queue JSON written to `queues/{queue-id}.json` (N files if multi-queue)
|
||||
- [ ] Index updated in `queues/index.json` with `active_queue_id`
|
||||
- [ ] Multi-queue: All queues share same `queue_group`
|
||||
- [ ] No circular dependencies in solution DAG
|
||||
- [ ] All conflicts resolved (auto or via user clarification)
|
||||
- [ ] Parallel groups have no file overlaps
|
||||
- [ ] Cross-queue: No file overlaps between queues
|
||||
- [ ] Issue statuses updated to `queued`
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/issue:execute` - Execute queue with codex
|
||||
- `ccw issue queue list` - View current queue
|
||||
- `ccw issue update --from-queue [queue-id]` - Sync issue statuses from queue
|
||||
@@ -1,764 +0,0 @@
|
||||
---
|
||||
name: code-map-memory
|
||||
description: 3-phase orchestrator: parse feature keyword → cli-explore-agent analyzes (Deep Scan dual-source) → orchestrator generates Mermaid docs + SKILL package (skips phase 2 if exists)
|
||||
argument-hint: "\"feature-keyword\" [--regenerate] [--tool <gemini|qwen>]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Bash(*), Read(*), Write(*), Task(*)
|
||||
---
|
||||
|
||||
# Code Flow Mapping Generator
|
||||
|
||||
## Overview
|
||||
|
||||
**Pure Orchestrator with Agent Delegation**: Prepares context paths and delegates code flow analysis to specialized cli-explore-agent. Orchestrator transforms agent's JSON analysis into Mermaid documentation.
|
||||
|
||||
**Auto-Continue Workflow**: Runs fully autonomously once triggered. Each phase completes and automatically triggers the next phase.
|
||||
|
||||
**Execution Paths**:
|
||||
- **Full Path**: All 3 phases (no existing codemap OR `--regenerate` specified)
|
||||
- **Skip Path**: Phase 1 → Phase 3 (existing codemap found AND no `--regenerate` flag)
|
||||
- **Phase 3 Always Executes**: SKILL index is always generated or updated
|
||||
|
||||
**Agent Responsibility** (cli-explore-agent):
|
||||
- Deep code flow analysis using dual-source strategy (Bash + Gemini CLI)
|
||||
- Returns structured JSON with architecture, functions, data flow, conditionals, patterns
|
||||
- NO file writing - analysis only
|
||||
|
||||
**Orchestrator Responsibility**:
|
||||
- Provides feature keyword and analysis scope to agent
|
||||
- Transforms agent's JSON into Mermaid-enriched markdown documentation
|
||||
- Writes all files (5 docs + metadata.json + SKILL.md)
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution
|
||||
2. **Feature-Specific SKILL**: Each feature creates independent `.claude/skills/codemap-{feature}/` package
|
||||
3. **Specialized Agent**: Phase 2a uses cli-explore-agent for professional code analysis (Deep Scan mode)
|
||||
4. **Orchestrator Documentation**: Phase 2b transforms agent JSON into Mermaid markdown files
|
||||
5. **Auto-Continue**: After completing each phase, update TodoWrite and immediately execute next phase
|
||||
6. **No User Prompts**: Never ask user questions or wait for input between phases
|
||||
7. **Track Progress**: Update TodoWrite after EVERY phase completion before starting next phase
|
||||
8. **Multi-Level Detail**: Generate 4 levels: architecture → function → data → conditional
|
||||
|
||||
---
|
||||
|
||||
## 3-Phase Execution
|
||||
|
||||
### Phase 1: Parse Feature Keyword & Check Existing
|
||||
|
||||
**Goal**: Normalize feature keyword, check existing codemap, prepare for analysis
|
||||
|
||||
**Step 1: Parse Feature Keyword**
|
||||
```bash
|
||||
# Get feature keyword from argument
|
||||
FEATURE_KEYWORD="$1"
|
||||
|
||||
# Normalize: lowercase, spaces to hyphens
|
||||
normalized_feature=$(echo "$FEATURE_KEYWORD" | tr '[:upper:]' '[:lower:]' | tr ' ' '-' | tr '_' '-')
|
||||
|
||||
# Example: "User Authentication" → "user-authentication"
|
||||
# Example: "支付处理" → "支付处理" (keep non-ASCII)
|
||||
```
|
||||
|
||||
**Step 2: Set Tool Preference**
|
||||
```bash
|
||||
# Default to gemini unless --tool specified
|
||||
TOOL="${tool_flag:-gemini}"
|
||||
```
|
||||
|
||||
**Step 3: Check Existing Codemap**
|
||||
```bash
|
||||
# Define codemap directory
|
||||
CODEMAP_DIR=".claude/skills/codemap-${normalized_feature}"
|
||||
|
||||
# Check if codemap exists
|
||||
bash(test -d "$CODEMAP_DIR" && echo "exists" || echo "not_exists")
|
||||
|
||||
# Count existing files
|
||||
bash(find "$CODEMAP_DIR" -name "*.md" 2>/dev/null | wc -l || echo 0)
|
||||
```
|
||||
|
||||
**Step 4: Skip Decision**
|
||||
```javascript
|
||||
if (existing_files > 0 && !regenerate_flag) {
|
||||
SKIP_GENERATION = true
|
||||
message = "Codemap already exists, skipping Phase 2. Use --regenerate to force regeneration."
|
||||
} else if (regenerate_flag) {
|
||||
bash(rm -rf "$CODEMAP_DIR")
|
||||
SKIP_GENERATION = false
|
||||
message = "Regenerating codemap from scratch."
|
||||
} else {
|
||||
SKIP_GENERATION = false
|
||||
message = "No existing codemap found, generating new code flow analysis."
|
||||
}
|
||||
```
|
||||
|
||||
**Output Variables**:
|
||||
- `FEATURE_KEYWORD`: Original feature keyword
|
||||
- `normalized_feature`: Normalized feature name for directory
|
||||
- `CODEMAP_DIR`: `.claude/skills/codemap-{feature}`
|
||||
- `TOOL`: CLI tool to use (gemini or qwen)
|
||||
- `SKIP_GENERATION`: Boolean - whether to skip Phase 2
|
||||
|
||||
**TodoWrite**:
|
||||
- If skipping: Mark phase 1 completed, phase 2 completed, phase 3 in_progress
|
||||
- If not skipping: Mark phase 1 completed, phase 2 in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Code Flow Analysis & Documentation Generation
|
||||
|
||||
**Skip Condition**: Skipped if `SKIP_GENERATION = true`
|
||||
|
||||
**Goal**: Use cli-explore-agent for professional code analysis, then orchestrator generates Mermaid documentation
|
||||
|
||||
**Architecture**: Phase 2a (Agent Analysis) → Phase 2b (Orchestrator Documentation)
|
||||
|
||||
---
|
||||
|
||||
#### Phase 2a: cli-explore-agent Analysis
|
||||
|
||||
**Purpose**: Leverage specialized cli-explore-agent for deep code flow analysis
|
||||
|
||||
**Agent Task Specification**:
|
||||
|
||||
```
|
||||
Task(
|
||||
subagent_type: "cli-explore-agent",
|
||||
description: "Analyze code flow: {FEATURE_KEYWORD}",
|
||||
prompt: "
|
||||
Perform Deep Scan analysis for feature: {FEATURE_KEYWORD}
|
||||
|
||||
**Analysis Mode**: deep-scan (Dual-source: Bash structural scan + Gemini semantic analysis)
|
||||
|
||||
**Analysis Objectives**:
|
||||
1. **Module Architecture**: Identify high-level module organization, interactions, and entry points
|
||||
2. **Function Call Chains**: Trace execution paths, call sequences, and parameter flows
|
||||
3. **Data Transformations**: Map data structure changes and transformation stages
|
||||
4. **Conditional Paths**: Document decision trees, branches, and error handling strategies
|
||||
5. **Design Patterns**: Discover architectural patterns and extract design intent
|
||||
|
||||
**Scope**:
|
||||
- Feature: {FEATURE_KEYWORD}
|
||||
- CLI Tool: {TOOL} (gemini-2.5-pro or qwen coder-model)
|
||||
- File Discovery: MCP Code Index (preferred) + rg fallback
|
||||
- Target: 5-15 most relevant files
|
||||
|
||||
**Expected Output Format**:
|
||||
Return comprehensive analysis as structured JSON:
|
||||
{
|
||||
\"feature\": \"{FEATURE_KEYWORD}\",
|
||||
\"analysis_metadata\": {
|
||||
\"tool_used\": \"gemini|qwen\",
|
||||
\"timestamp\": \"ISO_TIMESTAMP\",
|
||||
\"analysis_mode\": \"deep-scan\"
|
||||
},
|
||||
\"files_analyzed\": [
|
||||
{\"file\": \"path/to/file.ts\", \"relevance\": \"high|medium|low\", \"role\": \"brief description\"}
|
||||
],
|
||||
\"architecture\": {
|
||||
\"overview\": \"High-level description\",
|
||||
\"modules\": [
|
||||
{\"name\": \"ModuleName\", \"file\": \"file:line\", \"responsibility\": \"description\", \"dependencies\": [...]}
|
||||
],
|
||||
\"interactions\": [
|
||||
{\"from\": \"ModuleA\", \"to\": \"ModuleB\", \"type\": \"import|call|data-flow\", \"description\": \"...\"}
|
||||
],
|
||||
\"entry_points\": [
|
||||
{\"function\": \"main\", \"file\": \"file:line\", \"description\": \"...\"}
|
||||
]
|
||||
},
|
||||
\"function_calls\": {
|
||||
\"call_chains\": [
|
||||
{
|
||||
\"chain_id\": 1,
|
||||
\"description\": \"User authentication flow\",
|
||||
\"sequence\": [
|
||||
{\"function\": \"login\", \"file\": \"file:line\", \"calls\": [\"validateCredentials\", \"createSession\"]}
|
||||
]
|
||||
}
|
||||
],
|
||||
\"sequences\": [
|
||||
{\"from\": \"Client\", \"to\": \"AuthService\", \"method\": \"login(username, password)\", \"returns\": \"Session\"}
|
||||
]
|
||||
},
|
||||
\"data_flow\": {
|
||||
\"structures\": [
|
||||
{\"name\": \"UserData\", \"stage\": \"input\", \"shape\": {\"username\": \"string\", \"password\": \"string\"}}
|
||||
],
|
||||
\"transformations\": [
|
||||
{\"from\": \"RawInput\", \"to\": \"ValidatedData\", \"transformer\": \"validateUser\", \"file\": \"file:line\"}
|
||||
]
|
||||
},
|
||||
\"conditional_logic\": {
|
||||
\"branches\": [
|
||||
{\"condition\": \"isAuthenticated\", \"file\": \"file:line\", \"true_path\": \"...\", \"false_path\": \"...\"}
|
||||
],
|
||||
\"error_handling\": [
|
||||
{\"error_type\": \"AuthenticationError\", \"handler\": \"handleAuthError\", \"file\": \"file:line\", \"recovery\": \"retry|fail\"}
|
||||
]
|
||||
},
|
||||
\"design_patterns\": [
|
||||
{\"pattern\": \"Repository Pattern\", \"location\": \"src/repositories\", \"description\": \"...\"}
|
||||
],
|
||||
\"recommendations\": [
|
||||
\"Consider extracting authentication logic into separate module\",
|
||||
\"Add error recovery for network failures\"
|
||||
]
|
||||
}
|
||||
|
||||
**Critical Requirements**:
|
||||
- Use Deep Scan mode: Bash (Phase 1 - precise locations) + Gemini CLI (Phase 2 - semantic understanding) + Synthesis (Phase 3 - merge with attribution)
|
||||
- Focus exclusively on {FEATURE_KEYWORD} feature flow
|
||||
- Include file:line references for ALL findings
|
||||
- Extract design intent from code structure and comments
|
||||
- NO FILE WRITING - return JSON analysis only
|
||||
- Handle tool failures gracefully (Gemini → Qwen fallback, MCP → rg fallback)
|
||||
"
|
||||
)
|
||||
```
|
||||
|
||||
**Agent Output**: JSON analysis result with architecture, functions, data flow, conditionals, and patterns
|
||||
|
||||
---
|
||||
|
||||
#### Phase 2b: Orchestrator Documentation Generation
|
||||
|
||||
**Purpose**: Transform cli-explore-agent JSON into Mermaid-enriched documentation
|
||||
|
||||
**Input**: Agent's JSON analysis result
|
||||
|
||||
**Process**:
|
||||
|
||||
1. **Parse Agent Analysis**:
|
||||
```javascript
|
||||
const analysis = JSON.parse(agentResult)
|
||||
const { feature, files_analyzed, architecture, function_calls, data_flow, conditional_logic, design_patterns } = analysis
|
||||
```
|
||||
|
||||
2. **Generate Mermaid Diagrams from Structured Data**:
|
||||
|
||||
**a) architecture-flow.md** (~3K tokens):
|
||||
```javascript
|
||||
// Convert architecture.modules + architecture.interactions → Mermaid graph TD
|
||||
const architectureMermaid = `
|
||||
graph TD
|
||||
${architecture.modules.map(m => ` ${m.name}[${m.name}]`).join('\n')}
|
||||
${architecture.interactions.map(i => ` ${i.from} -->|${i.type}| ${i.to}`).join('\n')}
|
||||
`
|
||||
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/architecture-flow.md`,
|
||||
content: `---
|
||||
feature: ${feature}
|
||||
level: architecture
|
||||
detail: high-level module interactions
|
||||
---
|
||||
# Architecture Flow: ${feature}
|
||||
|
||||
## Overview
|
||||
${architecture.overview}
|
||||
|
||||
## Module Architecture
|
||||
${architecture.modules.map(m => `### ${m.name}\n- **File**: ${m.file}\n- **Role**: ${m.responsibility}\n- **Dependencies**: ${m.dependencies.join(', ')}`).join('\n\n')}
|
||||
|
||||
## Flow Diagram
|
||||
\`\`\`mermaid
|
||||
${architectureMermaid}
|
||||
\`\`\`
|
||||
|
||||
## Key Interactions
|
||||
${architecture.interactions.map(i => `- **${i.from} → ${i.to}**: ${i.description}`).join('\n')}
|
||||
|
||||
## Entry Points
|
||||
${architecture.entry_points.map(e => `- **${e.function}** (${e.file}): ${e.description}`).join('\n')}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**b) function-calls.md** (~5K tokens):
|
||||
```javascript
|
||||
// Convert function_calls.sequences → Mermaid sequenceDiagram
|
||||
const sequenceMermaid = `
|
||||
sequenceDiagram
|
||||
${function_calls.sequences.map(s => ` ${s.from}->>${s.to}: ${s.method}`).join('\n')}
|
||||
`
|
||||
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/function-calls.md`,
|
||||
content: `---
|
||||
feature: ${feature}
|
||||
level: function
|
||||
detail: function-level call sequences
|
||||
---
|
||||
# Function Call Chains: ${feature}
|
||||
|
||||
## Call Sequence Diagram
|
||||
\`\`\`mermaid
|
||||
${sequenceMermaid}
|
||||
\`\`\`
|
||||
|
||||
## Detailed Call Chains
|
||||
${function_calls.call_chains.map(chain => `
|
||||
### Chain ${chain.chain_id}: ${chain.description}
|
||||
${chain.sequence.map(fn => `- **${fn.function}** (${fn.file})\n - Calls: ${fn.calls.join(', ')}`).join('\n')}
|
||||
`).join('\n')}
|
||||
|
||||
## Parameters & Returns
|
||||
${function_calls.sequences.map(s => `- **${s.method}** → Returns: ${s.returns || 'void'}`).join('\n')}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**c) data-flow.md** (~4K tokens):
|
||||
```javascript
|
||||
// Convert data_flow.transformations → Mermaid flowchart LR
|
||||
const dataFlowMermaid = `
|
||||
flowchart LR
|
||||
${data_flow.transformations.map((t, i) => ` Stage${i}[${t.from}] -->|${t.transformer}| Stage${i+1}[${t.to}]`).join('\n')}
|
||||
`
|
||||
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/data-flow.md`,
|
||||
content: `---
|
||||
feature: ${feature}
|
||||
level: data
|
||||
detail: data structure transformations
|
||||
---
|
||||
# Data Flow: ${feature}
|
||||
|
||||
## Data Transformation Diagram
|
||||
\`\`\`mermaid
|
||||
${dataFlowMermaid}
|
||||
\`\`\`
|
||||
|
||||
## Data Structures
|
||||
${data_flow.structures.map(s => `### ${s.name} (${s.stage})\n\`\`\`json\n${JSON.stringify(s.shape, null, 2)}\n\`\`\``).join('\n\n')}
|
||||
|
||||
## Transformations
|
||||
${data_flow.transformations.map(t => `- **${t.from} → ${t.to}** via \`${t.transformer}\` (${t.file})`).join('\n')}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**d) conditional-paths.md** (~4K tokens):
|
||||
```javascript
|
||||
// Convert conditional_logic.branches → Mermaid flowchart TD
|
||||
const conditionalMermaid = `
|
||||
flowchart TD
|
||||
Start[Entry Point]
|
||||
${conditional_logic.branches.map((b, i) => `
|
||||
Start --> Check${i}{${b.condition}}
|
||||
Check${i} -->|Yes| Path${i}A[${b.true_path}]
|
||||
Check${i} -->|No| Path${i}B[${b.false_path}]
|
||||
`).join('\n')}
|
||||
`
|
||||
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/conditional-paths.md`,
|
||||
content: `---
|
||||
feature: ${feature}
|
||||
level: conditional
|
||||
detail: decision trees and error paths
|
||||
---
|
||||
# Conditional Paths: ${feature}
|
||||
|
||||
## Decision Tree
|
||||
\`\`\`mermaid
|
||||
${conditionalMermaid}
|
||||
\`\`\`
|
||||
|
||||
## Branch Conditions
|
||||
${conditional_logic.branches.map(b => `- **${b.condition}** (${b.file})\n - True: ${b.true_path}\n - False: ${b.false_path}`).join('\n')}
|
||||
|
||||
## Error Handling
|
||||
${conditional_logic.error_handling.map(e => `- **${e.error_type}**: Handler \`${e.handler}\` (${e.file}) - Recovery: ${e.recovery}`).join('\n')}
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**e) complete-flow.md** (~8K tokens):
|
||||
```javascript
|
||||
// Integrate all Mermaid diagrams
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/complete-flow.md`,
|
||||
content: `---
|
||||
feature: ${feature}
|
||||
level: complete
|
||||
detail: integrated multi-level view
|
||||
---
|
||||
# Complete Flow: ${feature}
|
||||
|
||||
## Integrated Flow Diagram
|
||||
\`\`\`mermaid
|
||||
graph TB
|
||||
subgraph Architecture
|
||||
${architecture.modules.map(m => ` ${m.name}[${m.name}]`).join('\n')}
|
||||
end
|
||||
|
||||
subgraph "Function Calls"
|
||||
${function_calls.call_chains[0]?.sequence.map(fn => ` ${fn.function}`).join('\n') || ''}
|
||||
end
|
||||
|
||||
subgraph "Data Flow"
|
||||
${data_flow.structures.map(s => ` ${s.name}[${s.name}]`).join('\n')}
|
||||
end
|
||||
\`\`\`
|
||||
|
||||
## Complete Trace
|
||||
[Comprehensive end-to-end documentation combining all analysis layers]
|
||||
|
||||
## Design Patterns Identified
|
||||
${design_patterns.map(p => `- **${p.pattern}** in ${p.location}: ${p.description}`).join('\n')}
|
||||
|
||||
## Recommendations
|
||||
${analysis.recommendations.map(r => `- ${r}`).join('\n')}
|
||||
|
||||
## Cross-References
|
||||
- [Architecture Flow](./architecture-flow.md) - High-level module structure
|
||||
- [Function Calls](./function-calls.md) - Detailed call chains
|
||||
- [Data Flow](./data-flow.md) - Data transformation stages
|
||||
- [Conditional Paths](./conditional-paths.md) - Decision trees and error handling
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
3. **Write metadata.json**:
|
||||
```javascript
|
||||
Write({
|
||||
file_path: `${CODEMAP_DIR}/metadata.json`,
|
||||
content: JSON.stringify({
|
||||
feature: feature,
|
||||
normalized_name: normalized_feature,
|
||||
generated_at: new Date().toISOString(),
|
||||
tool_used: analysis.analysis_metadata.tool_used,
|
||||
files_analyzed: files_analyzed.map(f => f.file),
|
||||
analysis_summary: {
|
||||
total_files: files_analyzed.length,
|
||||
modules_traced: architecture.modules.length,
|
||||
functions_traced: function_calls.call_chains.reduce((sum, c) => sum + c.sequence.length, 0),
|
||||
patterns_discovered: design_patterns.length
|
||||
}
|
||||
}, null, 2)
|
||||
})
|
||||
```
|
||||
|
||||
4. **Report Phase 2 Completion**:
|
||||
```
|
||||
Phase 2 Complete: Code flow analysis and documentation generated
|
||||
|
||||
- Agent Analysis: cli-explore-agent with {TOOL}
|
||||
- Files Analyzed: {count}
|
||||
- Documentation Generated: 5 markdown files + metadata.json
|
||||
- Location: {CODEMAP_DIR}
|
||||
```
|
||||
|
||||
**Completion Criteria**:
|
||||
- cli-explore-agent task completed successfully with JSON result
|
||||
- 5 documentation files written with valid Mermaid diagrams
|
||||
- metadata.json written with analysis summary
|
||||
- All files properly formatted and cross-referenced
|
||||
|
||||
**TodoWrite**: Mark phase 2 completed, phase 3 in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Generate SKILL.md Index
|
||||
|
||||
**Note**: This phase **ALWAYS executes** - generates or updates the SKILL index.
|
||||
|
||||
**Goal**: Read generated flow documentation and create SKILL.md index with progressive loading
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. **Verify Generated Files**:
|
||||
```bash
|
||||
bash(find "{CODEMAP_DIR}" -name "*.md" -type f | sort)
|
||||
```
|
||||
|
||||
2. **Read metadata.json**:
|
||||
```javascript
|
||||
Read({CODEMAP_DIR}/metadata.json)
|
||||
// Extract: feature, normalized_name, files_analyzed, analysis_summary
|
||||
```
|
||||
|
||||
3. **Read File Headers** (optional, first 30 lines):
|
||||
```javascript
|
||||
Read({CODEMAP_DIR}/architecture-flow.md, limit: 30)
|
||||
Read({CODEMAP_DIR}/function-calls.md, limit: 30)
|
||||
// Extract overview and diagram counts
|
||||
```
|
||||
|
||||
4. **Generate SKILL.md Index**:
|
||||
|
||||
Template structure:
|
||||
```yaml
|
||||
---
|
||||
name: codemap-{normalized_feature}
|
||||
description: Code flow mapping for {FEATURE_KEYWORD} feature (located at {project_path}). Load this SKILL when analyzing, tracing, or understanding {FEATURE_KEYWORD} execution flow, especially when no relevant context exists in memory.
|
||||
version: 1.0.0
|
||||
generated_at: {ISO_TIMESTAMP}
|
||||
---
|
||||
# Code Flow Map: {FEATURE_KEYWORD}
|
||||
|
||||
## Feature: `{FEATURE_KEYWORD}`
|
||||
|
||||
**Analysis Date**: {DATE}
|
||||
**Tool Used**: {TOOL}
|
||||
**Files Analyzed**: {COUNT}
|
||||
|
||||
## Progressive Loading
|
||||
|
||||
### Level 0: Quick Overview (~2K tokens)
|
||||
- [Architecture Flow](./architecture-flow.md) - High-level module interactions
|
||||
|
||||
### Level 1: Core Flows (~10K tokens)
|
||||
- [Architecture Flow](./architecture-flow.md) - Module architecture
|
||||
- [Function Calls](./function-calls.md) - Function call chains
|
||||
|
||||
### Level 2: Complete Analysis (~20K tokens)
|
||||
- [Architecture Flow](./architecture-flow.md)
|
||||
- [Function Calls](./function-calls.md)
|
||||
- [Data Flow](./data-flow.md) - Data transformations
|
||||
|
||||
### Level 3: Deep Dive (~30K tokens)
|
||||
- [Architecture Flow](./architecture-flow.md)
|
||||
- [Function Calls](./function-calls.md)
|
||||
- [Data Flow](./data-flow.md)
|
||||
- [Conditional Paths](./conditional-paths.md) - Branches and error handling
|
||||
- [Complete Flow](./complete-flow.md) - Integrated comprehensive view
|
||||
|
||||
## Usage
|
||||
|
||||
Load this SKILL package when:
|
||||
- Analyzing {FEATURE_KEYWORD} implementation
|
||||
- Tracing execution flow for debugging
|
||||
- Understanding code dependencies
|
||||
- Planning refactoring or enhancements
|
||||
|
||||
## Analysis Summary
|
||||
|
||||
- **Modules Traced**: {modules_traced}
|
||||
- **Functions Traced**: {functions_traced}
|
||||
- **Files Analyzed**: {total_files}
|
||||
|
||||
## Mermaid Diagrams Included
|
||||
|
||||
- Architecture flow diagram (graph TD)
|
||||
- Function call sequence diagram (sequenceDiagram)
|
||||
- Data transformation flowchart (flowchart LR)
|
||||
- Conditional decision tree (flowchart TD)
|
||||
- Complete integrated diagram (graph TB)
|
||||
```
|
||||
|
||||
5. **Write SKILL.md**:
|
||||
```javascript
|
||||
Write({
|
||||
file_path: `{CODEMAP_DIR}/SKILL.md`,
|
||||
content: generatedIndexMarkdown
|
||||
})
|
||||
```
|
||||
|
||||
**Completion Criteria**:
|
||||
- SKILL.md index written
|
||||
- All documentation files verified
|
||||
- Progressive loading levels (0-3) properly structured
|
||||
- Mermaid diagram references included
|
||||
|
||||
**TodoWrite**: Mark phase 3 completed
|
||||
|
||||
**Final Report**:
|
||||
```
|
||||
Code Flow Mapping Complete
|
||||
|
||||
Feature: {FEATURE_KEYWORD}
|
||||
Location: .claude/skills/codemap-{normalized_feature}/
|
||||
|
||||
Files Generated:
|
||||
- SKILL.md (index)
|
||||
- architecture-flow.md (with Mermaid diagram)
|
||||
- function-calls.md (with Mermaid sequence diagram)
|
||||
- data-flow.md (with Mermaid flowchart)
|
||||
- conditional-paths.md (with Mermaid decision tree)
|
||||
- complete-flow.md (with integrated Mermaid diagram)
|
||||
- metadata.json
|
||||
|
||||
Analysis:
|
||||
- Files analyzed: {count}
|
||||
- Modules traced: {count}
|
||||
- Functions traced: {count}
|
||||
|
||||
Usage: Skill(command: "codemap-{normalized_feature}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### TodoWrite Patterns
|
||||
|
||||
**Initialization** (Before Phase 1):
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse feature keyword and check existing", "status": "in_progress", "activeForm": "Parsing feature keyword"},
|
||||
{"content": "Agent analyzes code flow and generates files", "status": "pending", "activeForm": "Analyzing code flow"},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", "activeForm": "Generating SKILL index"}
|
||||
]})
|
||||
```
|
||||
|
||||
**Full Path** (SKIP_GENERATION = false):
|
||||
```javascript
|
||||
// After Phase 1
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse feature keyword and check existing", "status": "completed", ...},
|
||||
{"content": "Agent analyzes code flow and generates files", "status": "in_progress", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", ...}
|
||||
]})
|
||||
|
||||
// After Phase 2
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse feature keyword and check existing", "status": "completed", ...},
|
||||
{"content": "Agent analyzes code flow and generates files", "status": "completed", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", ...}
|
||||
]})
|
||||
|
||||
// After Phase 3
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse feature keyword and check existing", "status": "completed", ...},
|
||||
{"content": "Agent analyzes code flow and generates files", "status": "completed", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "completed", ...}
|
||||
]})
|
||||
```
|
||||
|
||||
**Skip Path** (SKIP_GENERATION = true):
|
||||
```javascript
|
||||
// After Phase 1 (skip Phase 2)
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse feature keyword and check existing", "status": "completed", ...},
|
||||
{"content": "Agent analyzes code flow and generates files", "status": "completed", ...}, // Skipped
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", ...}
|
||||
]})
|
||||
```
|
||||
|
||||
### Execution Flow
|
||||
|
||||
**Full Path**:
|
||||
```
|
||||
User → TodoWrite Init → Phase 1 (parse) → Phase 2 (agent analyzes) → Phase 3 (write index) → Report
|
||||
```
|
||||
|
||||
**Skip Path**:
|
||||
```
|
||||
User → TodoWrite Init → Phase 1 (detect existing) → Phase 3 (update index) → Report
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase 1 Errors**:
|
||||
- Empty feature keyword: Report error, ask user to provide feature description
|
||||
- Invalid characters: Normalize and continue
|
||||
|
||||
**Phase 2 Errors (Agent)**:
|
||||
- Agent task fails: Retry once, report if fails again
|
||||
- No files discovered: Warn user, ask for more specific feature keyword
|
||||
- CLI failures: Agent handles internally with retries
|
||||
- Invalid Mermaid syntax: Agent validates before writing
|
||||
|
||||
**Phase 3 Errors**:
|
||||
- Write failures: Report which files failed
|
||||
- Missing files: Note in SKILL.md, suggest regeneration
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
```bash
|
||||
/memory:code-map-memory "feature-keyword" [--regenerate] [--tool <gemini|qwen>]
|
||||
```
|
||||
|
||||
**Arguments**:
|
||||
- **"feature-keyword"**: Feature or flow to analyze (required)
|
||||
- Examples: `"user authentication"`, `"payment processing"`, `"数据导入流程"`
|
||||
- Can be English, Chinese, or mixed
|
||||
- Spaces and underscores normalized to hyphens
|
||||
- **--regenerate**: Force regenerate existing codemap (deletes and recreates)
|
||||
- **--tool**: CLI tool for analysis (default: gemini)
|
||||
- `gemini`: Comprehensive flow analysis with gemini-2.5-pro
|
||||
- `qwen`: Alternative with coder-model
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
**Generated File Structure** (for all examples):
|
||||
```
|
||||
.claude/skills/codemap-{feature}/
|
||||
├── SKILL.md # Index (Phase 3)
|
||||
├── architecture-flow.md # Agent (Phase 2) - High-level flow
|
||||
├── function-calls.md # Agent (Phase 2) - Function chains
|
||||
├── data-flow.md # Agent (Phase 2) - Data transformations
|
||||
├── conditional-paths.md # Agent (Phase 2) - Branches & errors
|
||||
├── complete-flow.md # Agent (Phase 2) - Integrated view
|
||||
└── metadata.json # Agent (Phase 2)
|
||||
```
|
||||
|
||||
### Example 1: User Authentication Flow
|
||||
|
||||
```bash
|
||||
/memory:code-map-memory "user authentication"
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Normalizes to "user-authentication", checks existing codemap
|
||||
2. Phase 2: Agent discovers auth-related files, executes CLI analysis, generates 5 flow docs with Mermaid
|
||||
3. Phase 3: Generates SKILL.md index with progressive loading
|
||||
|
||||
**Output**: `.claude/skills/codemap-user-authentication/` with 6 files + metadata
|
||||
|
||||
|
||||
### Example 3: Regenerate with Qwen
|
||||
|
||||
```bash
|
||||
/memory:code-map-memory "payment processing" --regenerate --tool qwen
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Deletes existing codemap due to --regenerate
|
||||
2. Phase 2: Agent uses qwen with coder-model for fresh analysis
|
||||
3. Phase 3: Generates updated SKILL.md
|
||||
|
||||
---
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Per-Feature SKILL**: Independent packages for each analyzed feature
|
||||
- **Specialized Agent**: cli-explore-agent with Deep Scan mode (Bash + Gemini dual-source)
|
||||
- **Professional Analysis**: Pre-defined workflow for code exploration and structure analysis
|
||||
- **Clear Separation**: Agent analyzes (JSON) → Orchestrator documents (Mermaid markdown)
|
||||
- **Multi-Level Detail**: 4 levels (architecture → function → data → conditional)
|
||||
- **Visual Flow**: Embedded Mermaid diagrams for all flow types
|
||||
- **Progressive Loading**: Token-efficient context loading (2K → 30K)
|
||||
- **Auto-Continue**: Fully autonomous 3-phase execution
|
||||
- **Smart Skip**: Detects existing codemap, 10x faster index updates
|
||||
- **CLI Integration**: Gemini/Qwen for deep semantic understanding
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
code-map-memory (orchestrator)
|
||||
├─ Phase 1: Parse & Check (bash commands, skip decision)
|
||||
├─ Phase 2: Code Analysis & Documentation (skippable)
|
||||
│ ├─ Phase 2a: cli-explore-agent Analysis
|
||||
│ │ └─ Deep Scan: Bash structural + Gemini semantic → JSON
|
||||
│ └─ Phase 2b: Orchestrator Documentation
|
||||
│ └─ Transform JSON → 5 Mermaid markdown files + metadata.json
|
||||
└─ Phase 3: Write SKILL.md (index generation, always runs)
|
||||
|
||||
Benefits:
|
||||
✅ Specialized agent: cli-explore-agent with dual-source strategy (Bash + Gemini)
|
||||
✅ Professional analysis: Pre-defined Deep Scan workflow
|
||||
✅ Clear separation: Agent analyzes (JSON) → Orchestrator documents (Mermaid)
|
||||
✅ Smart skip logic: 10x faster when codemap exists
|
||||
✅ Multi-level detail: Architecture → Functions → Data → Conditionals
|
||||
|
||||
Output: .claude/skills/codemap-{feature}/
|
||||
```
|
||||
383
.claude/commands/memory/compact.md
Normal file
383
.claude/commands/memory/compact.md
Normal file
@@ -0,0 +1,383 @@
|
||||
---
|
||||
name: compact
|
||||
description: Compact current session memory into structured text for session recovery, extracting objective/plan/files/decisions/constraints/state, and save via MCP core_memory tool
|
||||
argument-hint: "[optional: session description]"
|
||||
allowed-tools: mcp__ccw-tools__core_memory(*), Read(*)
|
||||
examples:
|
||||
- /memory:compact
|
||||
- /memory:compact "completed core-memory module"
|
||||
---
|
||||
|
||||
# Memory Compact Command (/memory:compact)
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The `memory:compact` command **compresses current session working memory** into structured text optimized for **session recovery**, extracts critical information, and saves it to persistent storage via MCP `core_memory` tool.
|
||||
|
||||
**Core Philosophy**:
|
||||
- **Session Recovery First**: Capture everything needed to resume work seamlessly
|
||||
- **Minimize Re-exploration**: Include file paths, decisions, and state to avoid redundant analysis
|
||||
- **Preserve Train of Thought**: Keep notes and hypotheses for complex debugging
|
||||
- **Actionable State**: Record last action result and known issues
|
||||
|
||||
## 2. Parameters
|
||||
|
||||
- `"session description"` (Optional): Session description to supplement objective
|
||||
- Example: "completed core-memory module"
|
||||
- Example: "debugging JWT refresh - suspected memory leak"
|
||||
|
||||
## 3. Structured Output Format
|
||||
|
||||
```markdown
|
||||
## Session ID
|
||||
[WFS-ID if workflow session active, otherwise (none)]
|
||||
|
||||
## Project Root
|
||||
[Absolute path to project root, e.g., D:\Claude_dms3]
|
||||
|
||||
## Objective
|
||||
[High-level goal - the "North Star" of this session]
|
||||
|
||||
## Execution Plan
|
||||
[CRITICAL: Embed the LATEST plan in its COMPLETE and DETAILED form]
|
||||
|
||||
### Source: [workflow | todo | user-stated | inferred]
|
||||
|
||||
<details>
|
||||
<summary>Full Execution Plan (Click to expand)</summary>
|
||||
|
||||
[PRESERVE COMPLETE PLAN VERBATIM - DO NOT SUMMARIZE]
|
||||
- ALL phases, tasks, subtasks
|
||||
- ALL file paths (absolute)
|
||||
- ALL dependencies and prerequisites
|
||||
- ALL acceptance criteria
|
||||
- ALL status markers ([x] done, [ ] pending)
|
||||
- ALL notes and context
|
||||
|
||||
Example:
|
||||
## Phase 1: Setup
|
||||
- [x] Initialize project structure
|
||||
- Created D:\Claude_dms3\src\core\index.ts
|
||||
- Added dependencies: lodash, zod
|
||||
- [ ] Configure TypeScript
|
||||
- Update tsconfig.json for strict mode
|
||||
|
||||
## Phase 2: Implementation
|
||||
- [ ] Implement core API
|
||||
- Target: D:\Claude_dms3\src\api\handler.ts
|
||||
- Dependencies: Phase 1 complete
|
||||
- Acceptance: All tests pass
|
||||
|
||||
</details>
|
||||
|
||||
## Working Files (Modified)
|
||||
[Absolute paths to actively modified files]
|
||||
- D:\Claude_dms3\src\file1.ts (role: main implementation)
|
||||
- D:\Claude_dms3\tests\file1.test.ts (role: unit tests)
|
||||
|
||||
## Reference Files (Read-Only)
|
||||
[Absolute paths to context files - NOT modified but essential for understanding]
|
||||
- D:\Claude_dms3\.claude\CLAUDE.md (role: project instructions)
|
||||
- D:\Claude_dms3\src\types\index.ts (role: type definitions)
|
||||
- D:\Claude_dms3\package.json (role: dependencies)
|
||||
|
||||
## Last Action
|
||||
[Last significant action and its result/status]
|
||||
|
||||
## Decisions
|
||||
- [Decision]: [Reasoning]
|
||||
- [Decision]: [Reasoning]
|
||||
|
||||
## Constraints
|
||||
- [User-specified limitation or preference]
|
||||
|
||||
## Dependencies
|
||||
- [Added/changed packages or environment requirements]
|
||||
|
||||
## Known Issues
|
||||
- [Deferred bug or edge case]
|
||||
|
||||
## Changes Made
|
||||
- [Completed modification]
|
||||
|
||||
## Pending
|
||||
- [Next step] or (none)
|
||||
|
||||
## Notes
|
||||
[Unstructured thoughts, hypotheses, debugging trails]
|
||||
```
|
||||
|
||||
## 4. Field Definitions
|
||||
|
||||
| Field | Purpose | Recovery Value |
|
||||
|-------|---------|----------------|
|
||||
| **Session ID** | Workflow session identifier (WFS-*) | Links memory to specific stateful task execution |
|
||||
| **Project Root** | Absolute path to project directory | Enables correct path resolution in new sessions |
|
||||
| **Objective** | Ultimate goal of the session | Prevents losing track of broader feature |
|
||||
| **Execution Plan** | Complete plan from any source (verbatim) | Preserves full planning context, avoids re-planning |
|
||||
| **Working Files** | Actively modified files (absolute paths) | Immediately identifies where work was happening |
|
||||
| **Reference Files** | Read-only context files (absolute paths) | Eliminates re-exploration for critical context |
|
||||
| **Last Action** | Final tool output/status | Immediate state awareness (success/failure) |
|
||||
| **Decisions** | Architectural choices + reasoning | Prevents re-litigating settled decisions |
|
||||
| **Constraints** | User-imposed limitations | Maintains personalized coding style |
|
||||
| **Dependencies** | Package/environment changes | Prevents missing dependency errors |
|
||||
| **Known Issues** | Deferred bugs/edge cases | Ensures issues aren't forgotten |
|
||||
| **Changes Made** | Completed modifications | Clear record of what was done |
|
||||
| **Pending** | Next steps | Immediate action items |
|
||||
| **Notes** | Hypotheses, debugging trails | Preserves "train of thought" |
|
||||
|
||||
## 5. Execution Flow
|
||||
|
||||
### Step 1: Analyze Current Session
|
||||
|
||||
Extract the following from conversation history:
|
||||
|
||||
```javascript
|
||||
const sessionAnalysis = {
|
||||
sessionId: "", // WFS-* if workflow session active, null otherwise
|
||||
projectRoot: "", // Absolute path: D:\Claude_dms3
|
||||
objective: "", // High-level goal (1-2 sentences)
|
||||
executionPlan: {
|
||||
source: "workflow" | "todo" | "user-stated" | "inferred",
|
||||
content: "" // Full plan content - ALWAYS preserve COMPLETE and DETAILED form
|
||||
},
|
||||
workingFiles: [], // {absolutePath, role} - modified files
|
||||
referenceFiles: [], // {absolutePath, role} - read-only context files
|
||||
lastAction: "", // Last significant action + result
|
||||
decisions: [], // {decision, reasoning}
|
||||
constraints: [], // User-specified limitations
|
||||
dependencies: [], // Added/changed packages
|
||||
knownIssues: [], // Deferred bugs
|
||||
changesMade: [], // Completed modifications
|
||||
pending: [], // Next steps
|
||||
notes: "" // Unstructured thoughts
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Generate Structured Text
|
||||
|
||||
```javascript
|
||||
// Helper: Generate execution plan section
|
||||
const generateExecutionPlan = (plan) => {
|
||||
const sourceLabels = {
|
||||
'workflow': 'workflow (IMPL_PLAN.md)',
|
||||
'todo': 'todo (TodoWrite)',
|
||||
'user-stated': 'user-stated',
|
||||
'inferred': 'inferred'
|
||||
};
|
||||
|
||||
// CRITICAL: Preserve complete plan content verbatim - DO NOT summarize
|
||||
return `### Source: ${sourceLabels[plan.source] || plan.source}
|
||||
|
||||
<details>
|
||||
<summary>Full Execution Plan (Click to expand)</summary>
|
||||
|
||||
${plan.content}
|
||||
|
||||
</details>`;
|
||||
};
|
||||
|
||||
const structuredText = `## Session ID
|
||||
${sessionAnalysis.sessionId || '(none)'}
|
||||
|
||||
## Project Root
|
||||
${sessionAnalysis.projectRoot}
|
||||
|
||||
## Objective
|
||||
${sessionAnalysis.objective}
|
||||
|
||||
## Execution Plan
|
||||
${generateExecutionPlan(sessionAnalysis.executionPlan)}
|
||||
|
||||
## Working Files (Modified)
|
||||
${sessionAnalysis.workingFiles.map(f => `- ${f.absolutePath} (role: ${f.role})`).join('\n') || '(none)'}
|
||||
|
||||
## Reference Files (Read-Only)
|
||||
${sessionAnalysis.referenceFiles.map(f => `- ${f.absolutePath} (role: ${f.role})`).join('\n') || '(none)'}
|
||||
|
||||
## Last Action
|
||||
${sessionAnalysis.lastAction}
|
||||
|
||||
## Decisions
|
||||
${sessionAnalysis.decisions.map(d => `- ${d.decision}: ${d.reasoning}`).join('\n') || '(none)'}
|
||||
|
||||
## Constraints
|
||||
${sessionAnalysis.constraints.map(c => `- ${c}`).join('\n') || '(none)'}
|
||||
|
||||
## Dependencies
|
||||
${sessionAnalysis.dependencies.map(d => `- ${d}`).join('\n') || '(none)'}
|
||||
|
||||
## Known Issues
|
||||
${sessionAnalysis.knownIssues.map(i => `- ${i}`).join('\n') || '(none)'}
|
||||
|
||||
## Changes Made
|
||||
${sessionAnalysis.changesMade.map(c => `- ${c}`).join('\n') || '(none)'}
|
||||
|
||||
## Pending
|
||||
${sessionAnalysis.pending.length > 0
|
||||
? sessionAnalysis.pending.map(p => `- ${p}`).join('\n')
|
||||
: '(none)'}
|
||||
|
||||
## Notes
|
||||
${sessionAnalysis.notes || '(none)'}`
|
||||
```
|
||||
|
||||
### Step 3: Import to Core Memory via MCP
|
||||
|
||||
Use the MCP `core_memory` tool to save the structured text:
|
||||
|
||||
```javascript
|
||||
mcp__ccw-tools__core_memory({
|
||||
operation: "import",
|
||||
text: structuredText
|
||||
})
|
||||
```
|
||||
|
||||
Or via CLI (pipe structured text to import):
|
||||
|
||||
```bash
|
||||
# Write structured text to temp file, then import
|
||||
echo "$structuredText" | ccw core-memory import
|
||||
|
||||
# Or from a file
|
||||
ccw core-memory import --file /path/to/session-memory.md
|
||||
```
|
||||
|
||||
**Response Format**:
|
||||
```json
|
||||
{
|
||||
"operation": "import",
|
||||
"id": "CMEM-YYYYMMDD-HHMMSS",
|
||||
"message": "Created memory: CMEM-YYYYMMDD-HHMMSS"
|
||||
}
|
||||
```
|
||||
|
||||
### Step 4: Report Recovery ID
|
||||
|
||||
After successful import, **clearly display the Recovery ID** to the user:
|
||||
|
||||
```
|
||||
╔════════════════════════════════════════════════════════════════════════════╗
|
||||
║ ✓ Session Memory Saved ║
|
||||
║ ║
|
||||
║ Recovery ID: CMEM-YYYYMMDD-HHMMSS ║
|
||||
║ ║
|
||||
║ To restore: "Please import memory <ID>" ║
|
||||
║ (MCP: core_memory export | CLI: ccw core-memory export --id <ID>) ║
|
||||
╚════════════════════════════════════════════════════════════════════════════╝
|
||||
```
|
||||
|
||||
## 6. Quality Checklist
|
||||
|
||||
Before generating:
|
||||
- [ ] Session ID captured if workflow session active (WFS-*)
|
||||
- [ ] Project Root is absolute path (e.g., D:\Claude_dms3)
|
||||
- [ ] Objective clearly states the "North Star" goal
|
||||
- [ ] Execution Plan: COMPLETE plan preserved VERBATIM (no summarization)
|
||||
- [ ] Plan Source: Clearly identified (workflow | todo | user-stated | inferred)
|
||||
- [ ] Plan Details: ALL phases, tasks, file paths, dependencies, status markers included
|
||||
- [ ] All file paths are ABSOLUTE (not relative)
|
||||
- [ ] Working Files: 3-8 modified files with roles
|
||||
- [ ] Reference Files: Key context files (CLAUDE.md, types, configs)
|
||||
- [ ] Last Action captures final state (success/failure)
|
||||
- [ ] Decisions include reasoning, not just choices
|
||||
- [ ] Known Issues separates deferred from forgotten bugs
|
||||
- [ ] Notes preserve debugging hypotheses if any
|
||||
|
||||
## 7. Path Resolution Rules
|
||||
|
||||
### Project Root Detection
|
||||
1. Check current working directory from environment
|
||||
2. Look for project markers: `.git/`, `package.json`, `.claude/`
|
||||
3. Use the topmost directory containing these markers
|
||||
|
||||
### Absolute Path Conversion
|
||||
```javascript
|
||||
// Convert relative to absolute
|
||||
const toAbsolutePath = (relativePath, projectRoot) => {
|
||||
if (path.isAbsolute(relativePath)) return relativePath;
|
||||
return path.join(projectRoot, relativePath);
|
||||
};
|
||||
|
||||
// Example: "src/api/auth.ts" → "D:\Claude_dms3\src\api\auth.ts"
|
||||
```
|
||||
|
||||
### Reference File Categories
|
||||
| Category | Examples | Priority |
|
||||
|----------|----------|----------|
|
||||
| Project Config | `.claude/CLAUDE.md`, `package.json`, `tsconfig.json` | High |
|
||||
| Type Definitions | `src/types/*.ts`, `*.d.ts` | High |
|
||||
| Related Modules | Parent/sibling modules with shared interfaces | Medium |
|
||||
| Test Files | Corresponding test files for modified code | Medium |
|
||||
| Documentation | `README.md`, `ARCHITECTURE.md` | Low |
|
||||
|
||||
## 8. Plan Detection (Priority Order)
|
||||
|
||||
### Priority 1: Workflow Session (IMPL_PLAN.md)
|
||||
```javascript
|
||||
// Check for active workflow session
|
||||
const manifest = await mcp__ccw-tools__session_manager({
|
||||
operation: "list",
|
||||
location: "active"
|
||||
});
|
||||
|
||||
if (manifest.sessions?.length > 0) {
|
||||
const session = manifest.sessions[0];
|
||||
const plan = await mcp__ccw-tools__session_manager({
|
||||
operation: "read",
|
||||
session_id: session.id,
|
||||
content_type: "plan"
|
||||
});
|
||||
sessionAnalysis.sessionId = session.id;
|
||||
sessionAnalysis.executionPlan.source = "workflow";
|
||||
sessionAnalysis.executionPlan.content = plan.content;
|
||||
}
|
||||
```
|
||||
|
||||
### Priority 2: TodoWrite (Current Session Todos)
|
||||
```javascript
|
||||
// Extract from conversation - look for TodoWrite tool calls
|
||||
// Preserve COMPLETE todo list with all details
|
||||
const todos = extractTodosFromConversation();
|
||||
if (todos.length > 0) {
|
||||
sessionAnalysis.executionPlan.source = "todo";
|
||||
// Format todos with full context - preserve status markers
|
||||
sessionAnalysis.executionPlan.content = todos.map(t =>
|
||||
`- [${t.status === 'completed' ? 'x' : t.status === 'in_progress' ? '>' : ' '}] ${t.content}`
|
||||
).join('\n');
|
||||
}
|
||||
```
|
||||
|
||||
### Priority 3: User-Stated Plan
|
||||
```javascript
|
||||
// Look for explicit plan statements in user messages:
|
||||
// - "Here's my plan: 1. ... 2. ... 3. ..."
|
||||
// - "I want to: first..., then..., finally..."
|
||||
// - Numbered or bulleted lists describing steps
|
||||
const userPlan = extractUserStatedPlan();
|
||||
if (userPlan) {
|
||||
sessionAnalysis.executionPlan.source = "user-stated";
|
||||
sessionAnalysis.executionPlan.content = userPlan;
|
||||
}
|
||||
```
|
||||
|
||||
### Priority 4: Inferred Plan
|
||||
```javascript
|
||||
// If no explicit plan, infer from:
|
||||
// - Task description and breakdown discussion
|
||||
// - Sequence of actions taken
|
||||
// - Outstanding work mentioned
|
||||
const inferredPlan = inferPlanFromDiscussion();
|
||||
if (inferredPlan) {
|
||||
sessionAnalysis.executionPlan.source = "inferred";
|
||||
sessionAnalysis.executionPlan.content = inferredPlan;
|
||||
}
|
||||
```
|
||||
|
||||
## 9. Notes
|
||||
|
||||
- **Timing**: Execute at task completion or before context switch
|
||||
- **Frequency**: Once per independent task or milestone
|
||||
- **Recovery**: New session can immediately continue with full context
|
||||
- **Knowledge Graph**: Entity relationships auto-extracted for visualization
|
||||
- **Absolute Paths**: Critical for cross-session recovery on different machines
|
||||
471
.claude/commands/memory/docs-full-cli.md
Normal file
471
.claude/commands/memory/docs-full-cli.md
Normal file
@@ -0,0 +1,471 @@
|
||||
---
|
||||
name: docs-full-cli
|
||||
description: Generate full project documentation using CLI execution (Layer 3→1) with batched agents (4 modules/agent) and gemini→qwen→codex fallback, <20 modules uses direct parallel
|
||||
argument-hint: "[path] [--tool <gemini|qwen|codex>]"
|
||||
---
|
||||
|
||||
# Full Documentation Generation - CLI Mode (/memory:docs-full-cli)
|
||||
|
||||
## Overview
|
||||
|
||||
Orchestrates project-wide documentation generation using CLI-based execution with batched agents and automatic tool fallback.
|
||||
|
||||
**Parameters**:
|
||||
- `path`: Target directory (default: current directory)
|
||||
- `--tool <gemini|qwen|codex>`: Primary tool (default: gemini)
|
||||
|
||||
**Execution Flow**: Discovery → Plan Presentation → Execution → Verification
|
||||
|
||||
## 3-Layer Architecture & Auto-Strategy Selection
|
||||
|
||||
### Layer Definition & Strategy Assignment
|
||||
|
||||
| Layer | Depth | Strategy | Purpose | Context Pattern |
|
||||
|-------|-------|----------|---------|----------------|
|
||||
| **Layer 3** (Deepest) | ≥3 | `full` | Generate docs for all subdirectories with code | `@**/*` (all files) |
|
||||
| **Layer 2** (Middle) | 1-2 | `single` | Current dir + child docs | `@*/API.md @*/README.md @*.{ts,tsx,js,...}` |
|
||||
| **Layer 1** (Top) | 0 | `single` | Current dir + child docs | `@*/API.md @*/README.md @*.{ts,tsx,js,...}` |
|
||||
|
||||
**Generation Direction**: Layer 3 → Layer 2 → Layer 1 (bottom-up dependency flow)
|
||||
|
||||
**Strategy Auto-Selection**: Strategies are automatically determined by directory depth - no user configuration needed.
|
||||
|
||||
### Strategy Details
|
||||
|
||||
#### Full Strategy (Layer 3 Only)
|
||||
- **Use Case**: Deepest directories with comprehensive file coverage
|
||||
- **Behavior**: Generates API.md + README.md for current directory AND subdirectories containing code
|
||||
- **Context**: All files in current directory tree (`@**/*`)
|
||||
- **Output**: `.workflow/docs/{project_name}/{path}/API.md` + `README.md`
|
||||
|
||||
|
||||
#### Single Strategy (Layers 1-2)
|
||||
- **Use Case**: Upper layers that aggregate from existing documentation
|
||||
- **Behavior**: Generates API.md + README.md only in current directory
|
||||
- **Context**: Direct children docs + current directory code files
|
||||
- **Output**: `.workflow/docs/{project_name}/{path}/API.md` + `README.md`
|
||||
|
||||
### Example Flow
|
||||
```
|
||||
src/auth/handlers/ (depth 3) → FULL STRATEGY
|
||||
CONTEXT: @**/* (all files in handlers/ and subdirs)
|
||||
GENERATES: .workflow/docs/project/src/auth/handlers/{API.md,README.md} + subdirs
|
||||
↓
|
||||
src/auth/ (depth 2) → SINGLE STRATEGY
|
||||
CONTEXT: @*/API.md @*/README.md @*.ts (handlers docs + current code)
|
||||
GENERATES: .workflow/docs/project/src/auth/{API.md,README.md} only
|
||||
↓
|
||||
src/ (depth 1) → SINGLE STRATEGY
|
||||
CONTEXT: @*/API.md @*/README.md (auth docs, utils docs)
|
||||
GENERATES: .workflow/docs/project/src/{API.md,README.md} only
|
||||
↓
|
||||
./ (depth 0) → SINGLE STRATEGY
|
||||
CONTEXT: @*/API.md @*/README.md (src docs, tests docs)
|
||||
GENERATES: .workflow/docs/project/{API.md,README.md} only
|
||||
```
|
||||
|
||||
## Core Execution Rules
|
||||
|
||||
1. **Analyze First**: Module discovery + folder classification before generation
|
||||
2. **Wait for Approval**: Present plan, no execution without user confirmation
|
||||
3. **Execution Strategy**:
|
||||
- **<20 modules**: Direct parallel execution (max 4 concurrent per layer)
|
||||
- **≥20 modules**: Agent batch processing (4 modules/agent, 73% overhead reduction)
|
||||
4. **Tool Fallback**: Auto-retry with fallback tools on failure
|
||||
5. **Layer Sequential**: Process layers 3→2→1 (bottom-up), parallel batches within layer
|
||||
6. **Safety Check**: Verify only docs files modified in .workflow/docs/
|
||||
7. **Layer-based Grouping**: Group modules by LAYER (not depth) for execution
|
||||
|
||||
## Tool Fallback Hierarchy
|
||||
|
||||
```javascript
|
||||
--tool gemini → [gemini, qwen, codex] // default
|
||||
--tool qwen → [qwen, gemini, codex]
|
||||
--tool codex → [codex, gemini, qwen]
|
||||
```
|
||||
|
||||
**Trigger**: Non-zero exit code from generation script
|
||||
|
||||
| Tool | Best For | Fallback To |
|
||||
|--------|--------------------------------|----------------|
|
||||
| gemini | Documentation, patterns | qwen → codex |
|
||||
| qwen | Architecture, system design | gemini → codex |
|
||||
| codex | Implementation, code quality | gemini → qwen |
|
||||
|
||||
## Execution Phases
|
||||
|
||||
### Phase 1: Discovery & Analysis
|
||||
|
||||
```javascript
|
||||
// Get project metadata
|
||||
Bash({command: "pwd && basename \"$(pwd)\" && git rev-parse --show-toplevel 2>/dev/null || pwd", run_in_background: false});
|
||||
|
||||
// Get module structure with classification
|
||||
Bash({command: "ccw tool exec get_modules_by_depth '{\"format\":\"list\"}' | ccw tool exec classify_folders '{}'", run_in_background: false});
|
||||
|
||||
// OR with path parameter
|
||||
Bash({command: "cd <target-path> && ccw tool exec get_modules_by_depth '{\"format\":\"list\"}' | ccw tool exec classify_folders '{}'", run_in_background: false});
|
||||
```
|
||||
|
||||
**Parse output** `depth:N|path:<PATH>|type:<code|navigation>|...` to extract module paths, types, and count.
|
||||
|
||||
**Smart filter**: Auto-detect and skip tests/build/config/vendor based on project tech stack.
|
||||
|
||||
### Phase 2: Plan Presentation
|
||||
|
||||
**For <20 modules**:
|
||||
```
|
||||
Documentation Generation Plan:
|
||||
Tool: gemini (fallback: qwen → codex)
|
||||
Total: 7 modules
|
||||
Execution: Direct parallel (< 20 modules threshold)
|
||||
Project: myproject
|
||||
Output: .workflow/docs/myproject/
|
||||
|
||||
Will generate docs for:
|
||||
- ./core/interfaces (12 files, type: code) - depth 2 [Layer 2] - single strategy
|
||||
- ./core (22 files, type: code) - depth 1 [Layer 2] - single strategy
|
||||
- ./models (9 files, type: code) - depth 1 [Layer 2] - single strategy
|
||||
- ./utils (12 files, type: navigation) - depth 1 [Layer 2] - single strategy
|
||||
- . (5 files, type: code) - depth 0 [Layer 1] - single strategy
|
||||
|
||||
Documentation Strategy (Auto-Selected):
|
||||
- Layer 2 (depth 1-2): API.md + README.md (current dir only, reference child docs)
|
||||
- Layer 1 (depth 0): API.md + README.md (current dir only, reference child docs)
|
||||
|
||||
Output Structure:
|
||||
- Code folders: API.md + README.md
|
||||
- Navigation folders: README.md only
|
||||
|
||||
Auto-skipped: ./tests, __pycache__, node_modules (15 paths)
|
||||
Execution order: Layer 2 → Layer 1
|
||||
Estimated time: ~5-10 minutes
|
||||
|
||||
Confirm execution? (y/n)
|
||||
```
|
||||
|
||||
**For ≥20 modules**:
|
||||
```
|
||||
Documentation Generation Plan:
|
||||
Tool: gemini (fallback: qwen → codex)
|
||||
Total: 31 modules
|
||||
Execution: Agent batch processing (4 modules/agent)
|
||||
Project: myproject
|
||||
Output: .workflow/docs/myproject/
|
||||
|
||||
Will generate docs for:
|
||||
- ./src/features/auth (12 files, type: code) - depth 3 [Layer 3] - full strategy
|
||||
- ./.claude/commands/cli (6 files, type: code) - depth 3 [Layer 3] - full strategy
|
||||
- ./src/utils (8 files, type: code) - depth 2 [Layer 2] - single strategy
|
||||
...
|
||||
|
||||
Documentation Strategy (Auto-Selected):
|
||||
- Layer 3 (depth ≥3): API.md + README.md (all subdirs with code)
|
||||
- Layer 2 (depth 1-2): API.md + README.md (current dir only)
|
||||
- Layer 1 (depth 0): API.md + README.md (current dir only)
|
||||
|
||||
Output Structure:
|
||||
- Code folders: API.md + README.md
|
||||
- Navigation folders: README.md only
|
||||
|
||||
Auto-skipped: ./tests, __pycache__, node_modules (15 paths)
|
||||
Execution order: Layer 3 → Layer 2 → Layer 1
|
||||
|
||||
Agent allocation (by LAYER):
|
||||
- Layer 3 (14 modules, depth ≥3): 4 agents [4, 4, 4, 2]
|
||||
- Layer 2 (15 modules, depth 1-2): 4 agents [4, 4, 4, 3]
|
||||
- Layer 1 (2 modules, depth 0): 1 agent [2]
|
||||
|
||||
Estimated time: ~15-25 minutes
|
||||
|
||||
Confirm execution? (y/n)
|
||||
```
|
||||
|
||||
### Phase 3A: Direct Execution (<20 modules)
|
||||
|
||||
**Strategy**: Parallel execution within layer (max 4 concurrent), no agent overhead.
|
||||
|
||||
**CRITICAL**: All Bash commands use `run_in_background: false` for synchronous execution.
|
||||
|
||||
```javascript
|
||||
let project_name = detect_project_name();
|
||||
|
||||
for (let layer of [3, 2, 1]) {
|
||||
if (modules_by_layer[layer].length === 0) continue;
|
||||
let batches = batch_modules(modules_by_layer[layer], 4);
|
||||
|
||||
for (let batch of batches) {
|
||||
let parallel_tasks = batch.map(module => {
|
||||
return async () => {
|
||||
let strategy = module.depth >= 3 ? "full" : "single";
|
||||
for (let tool of tool_order) {
|
||||
Bash({
|
||||
command: `cd ${module.path} && ccw tool exec generate_module_docs '{"strategy":"${strategy}","sourcePath":".","projectName":"${project_name}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ ${module.path} (Layer ${layer}) docs generated with ${tool}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
report(`❌ FAILED: ${module.path} (Layer ${layer}) failed all tools`);
|
||||
return false;
|
||||
};
|
||||
});
|
||||
await Promise.all(parallel_tasks.map(task => task()));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3B: Agent Batch Execution (≥20 modules)
|
||||
|
||||
**Strategy**: Batch modules into groups of 4, spawn memory-bridge agents per batch.
|
||||
|
||||
```javascript
|
||||
// Group modules by LAYER and batch within each layer
|
||||
let modules_by_layer = group_by_layer(module_list);
|
||||
let tool_order = construct_tool_order(primary_tool);
|
||||
let project_name = detect_project_name();
|
||||
|
||||
for (let layer of [3, 2, 1]) {
|
||||
if (modules_by_layer[layer].length === 0) continue;
|
||||
|
||||
let batches = batch_modules(modules_by_layer[layer], 4);
|
||||
let worker_tasks = [];
|
||||
|
||||
for (let batch of batches) {
|
||||
worker_tasks.push(
|
||||
Task(
|
||||
subagent_type="memory-bridge",
|
||||
description=`Generate docs for ${batch.length} modules in Layer ${layer}`,
|
||||
prompt=generate_batch_worker_prompt(batch, tool_order, layer, project_name)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
await parallel_execute(worker_tasks);
|
||||
}
|
||||
```
|
||||
|
||||
**Batch Worker Prompt Template**:
|
||||
```
|
||||
PURPOSE: Generate documentation for assigned modules with tool fallback
|
||||
|
||||
TASK: Generate API.md + README.md for assigned modules using specified strategies.
|
||||
|
||||
PROJECT: {{project_name}}
|
||||
OUTPUT: .workflow/docs/{{project_name}}/
|
||||
|
||||
MODULES:
|
||||
{{module_path_1}} (strategy: {{strategy_1}}, type: {{folder_type_1}})
|
||||
{{module_path_2}} (strategy: {{strategy_2}}, type: {{folder_type_2}})
|
||||
...
|
||||
|
||||
TOOLS (try in order): {{tool_1}}, {{tool_2}}, {{tool_3}}
|
||||
|
||||
EXECUTION SCRIPT: ccw tool exec generate_module_docs
|
||||
- Accepts strategy parameter: full | single
|
||||
- Accepts folder type detection: code | navigation
|
||||
- Tool execution via direct CLI commands (gemini/qwen/codex)
|
||||
- Output path: .workflow/docs/{{project_name}}/{module_path}/
|
||||
|
||||
EXECUTION FLOW (for each module):
|
||||
1. Tool fallback loop (exit on first success):
|
||||
for tool in {{tool_1}} {{tool_2}} {{tool_3}}; do
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec generate_module_docs '{"strategy":"{{strategy}}","sourcePath":".","projectName":"{{project_name}}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
report "✅ {{module_path}} docs generated with $tool"
|
||||
break
|
||||
else
|
||||
report "⚠️ {{module_path}} failed with $tool, trying next..."
|
||||
continue
|
||||
fi
|
||||
done
|
||||
|
||||
2. Handle complete failure (all tools failed):
|
||||
if [ $exit_code -ne 0 ]; then
|
||||
report "❌ FAILED: {{module_path}} - all tools exhausted"
|
||||
# Continue to next module (do not abort batch)
|
||||
fi
|
||||
|
||||
FOLDER TYPE HANDLING:
|
||||
- code: Generate API.md + README.md
|
||||
- navigation: Generate README.md only
|
||||
|
||||
FAILURE HANDLING:
|
||||
- Module-level isolation: One module's failure does not affect others
|
||||
- Exit code detection: Non-zero exit code triggers next tool
|
||||
- Exhaustion reporting: Log modules where all tools failed
|
||||
- Batch continuation: Always process remaining modules
|
||||
|
||||
REPORTING FORMAT:
|
||||
Per-module status:
|
||||
✅ path/to/module docs generated with {tool}
|
||||
⚠️ path/to/module failed with {tool}, trying next...
|
||||
❌ FAILED: path/to/module - all tools exhausted
|
||||
```
|
||||
|
||||
### Phase 4: Project-Level Documentation
|
||||
|
||||
**After all module documentation is generated, create project-level documentation files.**
|
||||
|
||||
```javascript
|
||||
let project_name = detect_project_name();
|
||||
let project_root = get_project_root();
|
||||
|
||||
// Step 1: Generate Project README
|
||||
report("Generating project README.md...");
|
||||
for (let tool of tool_order) {
|
||||
Bash({
|
||||
command: `cd ${project_root} && ccw tool exec generate_module_docs '{"strategy":"project-readme","sourcePath":".","projectName":"${project_name}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ Project README generated with ${tool}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Generate Architecture & Examples
|
||||
report("Generating ARCHITECTURE.md and EXAMPLES.md...");
|
||||
for (let tool of tool_order) {
|
||||
Bash({
|
||||
command: `cd ${project_root} && ccw tool exec generate_module_docs '{"strategy":"project-architecture","sourcePath":".","projectName":"${project_name}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ Architecture docs generated with ${tool}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Generate HTTP API documentation (if API routes detected)
|
||||
Bash({command: 'rg "router\\.|@Get|@Post" -g "*.{ts,js,py}" 2>/dev/null && echo "API_FOUND" || echo "NO_API"', run_in_background: false});
|
||||
if (bash_result.stdout.includes("API_FOUND")) {
|
||||
report("Generating HTTP API documentation...");
|
||||
for (let tool of tool_order) {
|
||||
Bash({
|
||||
command: `cd ${project_root} && ccw tool exec generate_module_docs '{"strategy":"http-api","sourcePath":".","projectName":"${project_name}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ HTTP API docs generated with ${tool}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Expected Output**:
|
||||
```
|
||||
Project-Level Documentation:
|
||||
✅ README.md (project root overview)
|
||||
✅ ARCHITECTURE.md (system design)
|
||||
✅ EXAMPLES.md (usage examples)
|
||||
✅ api/README.md (HTTP API reference) [optional]
|
||||
```
|
||||
|
||||
### Phase 5: Verification
|
||||
|
||||
```javascript
|
||||
// Check documentation files created
|
||||
Bash({command: 'find .workflow/docs -type f -name "*.md" 2>/dev/null | wc -l', run_in_background: false});
|
||||
|
||||
// Display structure
|
||||
Bash({command: 'tree -L 3 .workflow/docs/', run_in_background: false});
|
||||
```
|
||||
|
||||
**Result Summary**:
|
||||
```
|
||||
Documentation Generation Summary:
|
||||
Total: 31 | Success: 29 | Failed: 2
|
||||
Tool usage: gemini: 25, qwen: 4, codex: 0
|
||||
Failed: path1, path2
|
||||
|
||||
Generated documentation:
|
||||
.workflow/docs/myproject/
|
||||
├── src/
|
||||
│ ├── auth/
|
||||
│ │ ├── API.md
|
||||
│ │ └── README.md
|
||||
│ └── utils/
|
||||
│ └── README.md
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
**Batch Worker**: Tool fallback per module, batch isolation, clear status reporting
|
||||
**Coordinator**: Invalid path abort, user decline handling, verification with cleanup
|
||||
**Fallback Triggers**: Non-zero exit code, script timeout, unexpected output
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.workflow/docs/{project_name}/
|
||||
├── src/ # Mirrors source structure
|
||||
│ ├── modules/
|
||||
│ │ ├── README.md # Navigation
|
||||
│ │ ├── auth/
|
||||
│ │ │ ├── API.md # API signatures
|
||||
│ │ │ ├── README.md # Module docs
|
||||
│ │ │ └── middleware/
|
||||
│ │ │ ├── API.md
|
||||
│ │ │ └── README.md
|
||||
│ │ └── api/
|
||||
│ │ ├── API.md
|
||||
│ │ └── README.md
|
||||
│ └── utils/
|
||||
│ └── README.md
|
||||
├── lib/
|
||||
│ └── core/
|
||||
│ ├── API.md
|
||||
│ └── README.md
|
||||
├── README.md # ✨ Project root overview (auto-generated)
|
||||
├── ARCHITECTURE.md # ✨ System design (auto-generated)
|
||||
├── EXAMPLES.md # ✨ Usage examples (auto-generated)
|
||||
└── api/ # ✨ Optional (auto-generated if HTTP API detected)
|
||||
└── README.md # HTTP API reference
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
# Full project documentation generation
|
||||
/memory:docs-full-cli
|
||||
|
||||
# Target specific directory
|
||||
/memory:docs-full-cli src/features/auth
|
||||
/memory:docs-full-cli .claude
|
||||
|
||||
# Use specific tool
|
||||
/memory:docs-full-cli --tool qwen
|
||||
/memory:docs-full-cli src --tool qwen
|
||||
```
|
||||
|
||||
## Key Advantages
|
||||
|
||||
- **Efficiency**: 30 modules → 8 agents (73% reduction from sequential)
|
||||
- **Resilience**: 3-tier tool fallback per module
|
||||
- **Performance**: Parallel batches, no concurrency limits
|
||||
- **Observability**: Per-module tool usage, batch-level metrics
|
||||
- **Automation**: Zero configuration - strategy auto-selected by directory depth
|
||||
- **Path Mirroring**: Clear 1:1 mapping between source and documentation structure
|
||||
|
||||
## Template Reference
|
||||
|
||||
Templates used from `~/.claude/workflows/cli-templates/prompts/documentation/`:
|
||||
- `api.txt`: Code API documentation (Part A: Code API, Part B: HTTP API)
|
||||
- `module-readme.txt`: Module purpose, usage, dependencies
|
||||
- `folder-navigation.txt`: Navigation README for folders with subdirectories
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/memory:docs` - Agent-based documentation planning workflow
|
||||
- `/memory:docs-related-cli` - Update docs for changed modules only
|
||||
- `/workflow:execute` - Execute documentation tasks (when using agent mode)
|
||||
386
.claude/commands/memory/docs-related-cli.md
Normal file
386
.claude/commands/memory/docs-related-cli.md
Normal file
@@ -0,0 +1,386 @@
|
||||
---
|
||||
name: docs-related-cli
|
||||
description: Generate/update documentation for git-changed modules using CLI execution with batched agents (4 modules/agent) and gemini→qwen→codex fallback, <15 modules uses direct parallel
|
||||
argument-hint: "[--tool <gemini|qwen|codex>]"
|
||||
---
|
||||
|
||||
# Related Documentation Generation - CLI Mode (/memory:docs-related-cli)
|
||||
|
||||
## Overview
|
||||
|
||||
Orchestrates context-aware documentation generation/update for changed modules using CLI-based execution with batched agents and automatic tool fallback (gemini→qwen→codex).
|
||||
|
||||
**Parameters**:
|
||||
- `--tool <gemini|qwen|codex>`: Primary tool (default: gemini)
|
||||
|
||||
**Execution Flow**:
|
||||
1. Change Detection → 2. Plan Presentation → 3. Batched Execution → 4. Verification
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Detect Changes First**: Use git diff to identify affected modules
|
||||
2. **Wait for Approval**: Present plan, no execution without user confirmation
|
||||
3. **Execution Strategy**:
|
||||
- **<15 modules**: Direct parallel execution (max 4 concurrent per depth, no agent overhead)
|
||||
- **≥15 modules**: Agent batch processing (4 modules/agent, 73% overhead reduction)
|
||||
4. **Tool Fallback**: Auto-retry with fallback tools on failure
|
||||
5. **Depth Sequential**: Process depths N→0, parallel batches within depth (both modes)
|
||||
6. **Related Mode**: Generate/update only changed modules and their parent contexts
|
||||
7. **Single Strategy**: Always use `single` strategy (incremental update)
|
||||
|
||||
## Tool Fallback Hierarchy
|
||||
|
||||
```javascript
|
||||
--tool gemini → [gemini, qwen, codex] // default
|
||||
--tool qwen → [qwen, gemini, codex]
|
||||
--tool codex → [codex, gemini, qwen]
|
||||
```
|
||||
|
||||
**Trigger**: Non-zero exit code from generation script
|
||||
|
||||
| Tool | Best For | Fallback To |
|
||||
|--------|--------------------------------|----------------|
|
||||
| gemini | Documentation, patterns | qwen → codex |
|
||||
| qwen | Architecture, system design | gemini → codex |
|
||||
| codex | Implementation, code quality | gemini → qwen |
|
||||
|
||||
## Phase 1: Change Detection & Analysis
|
||||
|
||||
```javascript
|
||||
// Get project metadata
|
||||
Bash({command: "pwd && basename \"$(pwd)\" && git rev-parse --show-toplevel 2>/dev/null || pwd", run_in_background: false});
|
||||
|
||||
// Detect changed modules
|
||||
Bash({command: "ccw tool exec detect_changed_modules '{\"format\":\"list\"}'", run_in_background: false});
|
||||
|
||||
// Cache git changes
|
||||
Bash({command: "git add -A 2>/dev/null || true", run_in_background: false});
|
||||
```
|
||||
|
||||
**Parse output** `depth:N|path:<PATH>|change:<TYPE>|type:<code|navigation>` to extract affected modules.
|
||||
|
||||
**Smart filter**: Auto-detect and skip tests/build/config/vendor based on project tech stack (Node.js/Python/Go/Rust/etc).
|
||||
|
||||
**Fallback**: If no changes detected, use recent modules (first 10 by depth).
|
||||
|
||||
## Phase 2: Plan Presentation
|
||||
|
||||
**Present filtered plan**:
|
||||
```
|
||||
Related Documentation Generation Plan:
|
||||
Tool: gemini (fallback: qwen → codex)
|
||||
Changed: 4 modules | Batching: 4 modules/agent
|
||||
Project: myproject
|
||||
Output: .workflow/docs/myproject/
|
||||
|
||||
Will generate/update docs for:
|
||||
- ./src/api/auth (5 files, type: code) [new module]
|
||||
- ./src/api (12 files, type: code) [parent of changed auth/]
|
||||
- ./src (8 files, type: code) [parent context]
|
||||
- . (14 files, type: code) [root level]
|
||||
|
||||
Documentation Strategy:
|
||||
- Strategy: single (all modules - incremental update)
|
||||
- Output: API.md + README.md (code folders), README.md only (navigation folders)
|
||||
- Context: Current dir code + child docs
|
||||
|
||||
Auto-skipped (12 paths):
|
||||
- Tests: ./src/api/auth.test.ts (8 paths)
|
||||
- Config: tsconfig.json (3 paths)
|
||||
- Other: node_modules (1 path)
|
||||
|
||||
Agent allocation:
|
||||
- Depth 3 (1 module): 1 agent [1]
|
||||
- Depth 2 (1 module): 1 agent [1]
|
||||
- Depth 1 (1 module): 1 agent [1]
|
||||
- Depth 0 (1 module): 1 agent [1]
|
||||
|
||||
Estimated time: ~5-10 minutes
|
||||
|
||||
Confirm execution? (y/n)
|
||||
```
|
||||
|
||||
**Decision logic**:
|
||||
- User confirms "y": Proceed with execution
|
||||
- User declines "n": Abort, no changes
|
||||
- <15 modules: Direct execution
|
||||
- ≥15 modules: Agent batch execution
|
||||
|
||||
## Phase 3A: Direct Execution (<15 modules)
|
||||
|
||||
**Strategy**: Parallel execution within depth (max 4 concurrent), no agent overhead.
|
||||
|
||||
**CRITICAL**: All Bash commands use `run_in_background: false` for synchronous execution.
|
||||
|
||||
```javascript
|
||||
let project_name = detect_project_name();
|
||||
|
||||
for (let depth of sorted_depths.reverse()) { // N → 0
|
||||
let batches = batch_modules(modules_by_depth[depth], 4);
|
||||
|
||||
for (let batch of batches) {
|
||||
let parallel_tasks = batch.map(module => {
|
||||
return async () => {
|
||||
for (let tool of tool_order) {
|
||||
Bash({
|
||||
command: `cd ${module.path} && ccw tool exec generate_module_docs '{"strategy":"single","sourcePath":".","projectName":"${project_name}","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ ${module.path} docs generated with ${tool}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
report(`❌ FAILED: ${module.path} failed all tools`);
|
||||
return false;
|
||||
};
|
||||
});
|
||||
await Promise.all(parallel_tasks.map(task => task()));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 3B: Agent Batch Execution (≥15 modules)
|
||||
|
||||
### Batching Strategy
|
||||
|
||||
```javascript
|
||||
// Batch modules into groups of 4
|
||||
function batch_modules(modules, batch_size = 4) {
|
||||
let batches = [];
|
||||
for (let i = 0; i < modules.length; i += batch_size) {
|
||||
batches.push(modules.slice(i, i + batch_size));
|
||||
}
|
||||
return batches;
|
||||
}
|
||||
// Examples: 10→[4,4,2] | 8→[4,4] | 3→[3]
|
||||
```
|
||||
|
||||
### Coordinator Orchestration
|
||||
|
||||
```javascript
|
||||
let modules_by_depth = group_by_depth(changed_modules);
|
||||
let tool_order = construct_tool_order(primary_tool);
|
||||
let project_name = detect_project_name();
|
||||
|
||||
for (let depth of sorted_depths.reverse()) { // N → 0
|
||||
let batches = batch_modules(modules_by_depth[depth], 4);
|
||||
let worker_tasks = [];
|
||||
|
||||
for (let batch of batches) {
|
||||
worker_tasks.push(
|
||||
Task(
|
||||
subagent_type="memory-bridge",
|
||||
description=`Generate docs for ${batch.length} modules at depth ${depth}`,
|
||||
prompt=generate_batch_worker_prompt(batch, tool_order, depth, project_name, "related")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
await parallel_execute(worker_tasks); // Batches run in parallel
|
||||
}
|
||||
```
|
||||
|
||||
### Batch Worker Prompt Template
|
||||
|
||||
```
|
||||
PURPOSE: Generate/update documentation for assigned modules with tool fallback (related mode)
|
||||
|
||||
TASK:
|
||||
Generate documentation for the following modules based on recent changes. For each module, try tools in order until success.
|
||||
|
||||
PROJECT: {{project_name}}
|
||||
OUTPUT: .workflow/docs/{{project_name}}/
|
||||
|
||||
MODULES:
|
||||
{{module_path_1}} (type: {{folder_type_1}})
|
||||
{{module_path_2}} (type: {{folder_type_2}})
|
||||
{{module_path_3}} (type: {{folder_type_3}})
|
||||
{{module_path_4}} (type: {{folder_type_4}})
|
||||
|
||||
TOOLS (try in order):
|
||||
1. {{tool_1}}
|
||||
2. {{tool_2}}
|
||||
3. {{tool_3}}
|
||||
|
||||
EXECUTION:
|
||||
For each module above:
|
||||
1. Try tool 1:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec generate_module_docs '{"strategy":"single","sourcePath":".","projectName":"{{project_name}}","tool":"{{tool_1}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} docs generated with {{tool_1}}", proceed to next module
|
||||
→ Failure: Try tool 2
|
||||
2. Try tool 2:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec generate_module_docs '{"strategy":"single","sourcePath":".","projectName":"{{project_name}}","tool":"{{tool_2}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} docs generated with {{tool_2}}", proceed to next module
|
||||
→ Failure: Try tool 3
|
||||
3. Try tool 3:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec generate_module_docs '{"strategy":"single","sourcePath":".","projectName":"{{project_name}}","tool":"{{tool_3}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} docs generated with {{tool_3}}", proceed to next module
|
||||
→ Failure: Report "❌ FAILED: {{module_path}} failed all tools", proceed to next module
|
||||
|
||||
FOLDER TYPE HANDLING:
|
||||
- code: Generate API.md + README.md
|
||||
- navigation: Generate README.md only
|
||||
|
||||
REPORTING:
|
||||
Report final summary with:
|
||||
- Total processed: X modules
|
||||
- Successful: Y modules
|
||||
- Failed: Z modules
|
||||
- Tool usage: {{tool_1}}:X, {{tool_2}}:Y, {{tool_3}}:Z
|
||||
```
|
||||
|
||||
## Phase 4: Verification
|
||||
|
||||
```javascript
|
||||
// Check documentation files created/updated
|
||||
Bash({command: 'find .workflow/docs -type f -name "*.md" 2>/dev/null | wc -l', run_in_background: false});
|
||||
|
||||
// Display recent changes
|
||||
Bash({command: 'find .workflow/docs -type f -name "*.md" -mmin -60 2>/dev/null', run_in_background: false});
|
||||
```
|
||||
|
||||
**Aggregate results**:
|
||||
```
|
||||
Documentation Generation Summary:
|
||||
Total: 4 | Success: 4 | Failed: 0
|
||||
|
||||
Tool usage:
|
||||
- gemini: 4 modules
|
||||
- qwen: 0 modules (fallback)
|
||||
- codex: 0 modules
|
||||
|
||||
Changes:
|
||||
.workflow/docs/myproject/src/api/auth/API.md (new)
|
||||
.workflow/docs/myproject/src/api/auth/README.md (new)
|
||||
.workflow/docs/myproject/src/api/API.md (updated)
|
||||
.workflow/docs/myproject/src/api/README.md (updated)
|
||||
.workflow/docs/myproject/src/API.md (updated)
|
||||
.workflow/docs/myproject/src/README.md (updated)
|
||||
.workflow/docs/myproject/API.md (updated)
|
||||
.workflow/docs/myproject/README.md (updated)
|
||||
```
|
||||
|
||||
## Execution Summary
|
||||
|
||||
**Module Count Threshold**:
|
||||
- **<15 modules**: Coordinator executes Phase 3A (Direct Execution)
|
||||
- **≥15 modules**: Coordinator executes Phase 3B (Agent Batch Execution)
|
||||
|
||||
**Agent Hierarchy** (for ≥15 modules):
|
||||
- **Coordinator**: Handles batch division, spawns worker agents per depth
|
||||
- **Worker Agents**: Each processes 4 modules with tool fallback (related mode)
|
||||
|
||||
## Error Handling
|
||||
|
||||
**Batch Worker**:
|
||||
- Tool fallback per module (auto-retry)
|
||||
- Batch isolation (failures don't propagate)
|
||||
- Clear per-module status reporting
|
||||
|
||||
**Coordinator**:
|
||||
- No changes: Use fallback (recent 10 modules)
|
||||
- User decline: No execution
|
||||
- Verification fail: Report incomplete modules
|
||||
- Partial failures: Continue execution, report failed modules
|
||||
|
||||
**Fallback Triggers**:
|
||||
- Non-zero exit code
|
||||
- Script timeout
|
||||
- Unexpected output
|
||||
|
||||
## Output Structure
|
||||
|
||||
```
|
||||
.workflow/docs/{project_name}/
|
||||
├── src/ # Mirrors source structure
|
||||
│ ├── modules/
|
||||
│ │ ├── README.md
|
||||
│ │ ├── auth/
|
||||
│ │ │ ├── API.md # Updated based on code changes
|
||||
│ │ │ └── README.md # Updated based on code changes
|
||||
│ │ └── api/
|
||||
│ │ ├── API.md
|
||||
│ │ └── README.md
|
||||
│ └── utils/
|
||||
│ └── README.md
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
# Daily development documentation update
|
||||
/memory:docs-related-cli
|
||||
|
||||
# After feature work with specific tool
|
||||
/memory:docs-related-cli --tool qwen
|
||||
|
||||
# Code quality documentation review after implementation
|
||||
/memory:docs-related-cli --tool codex
|
||||
```
|
||||
|
||||
## Key Advantages
|
||||
|
||||
**Efficiency**: 30 modules → 8 agents (73% reduction)
|
||||
**Resilience**: 3-tier fallback per module
|
||||
**Performance**: Parallel batches, no concurrency limits
|
||||
**Context-aware**: Updates based on actual git changes
|
||||
**Fast**: Only affected modules, not entire project
|
||||
**Incremental**: Single strategy for focused updates
|
||||
|
||||
## Coordinator Checklist
|
||||
|
||||
- Parse `--tool` (default: gemini)
|
||||
- Get project metadata (name, root)
|
||||
- Detect changed modules via detect_changed_modules.sh
|
||||
- **Smart filter modules** (auto-detect tech stack, skip tests/build/config/vendor)
|
||||
- Cache git changes
|
||||
- Apply fallback if no changes (recent 10 modules)
|
||||
- Construct tool fallback order
|
||||
- **Present filtered plan** with skip reasons and change types
|
||||
- **Wait for y/n confirmation**
|
||||
- Determine execution mode:
|
||||
- **<15 modules**: Direct execution (Phase 3A)
|
||||
- For each depth (N→0): Sequential module updates with tool fallback
|
||||
- **≥15 modules**: Agent batch execution (Phase 3B)
|
||||
- For each depth (N→0): Batch modules (4 per batch), spawn batch workers in parallel
|
||||
- Wait for depth/batch completion
|
||||
- Aggregate results
|
||||
- Verification check (documentation files created/updated)
|
||||
- Display summary + recent changes
|
||||
|
||||
## Comparison with Full Documentation Generation
|
||||
|
||||
| Aspect | Related Generation | Full Generation |
|
||||
|--------|-------------------|-----------------|
|
||||
| **Scope** | Changed modules only | All project modules |
|
||||
| **Speed** | Fast (minutes) | Slower (10-30 min) |
|
||||
| **Use case** | Daily development | Initial setup, major refactoring |
|
||||
| **Strategy** | `single` (all) | `full` (L3) + `single` (L1-2) |
|
||||
| **Trigger** | After commits | After setup or major changes |
|
||||
| **Batching** | 4 modules/agent | 4 modules/agent |
|
||||
| **Fallback** | gemini→qwen→codex | gemini→qwen→codex |
|
||||
| **Complexity threshold** | ≤15 modules | ≤20 modules |
|
||||
|
||||
## Template Reference
|
||||
|
||||
Templates used from `~/.claude/workflows/cli-templates/prompts/documentation/`:
|
||||
- `api.txt`: Code API documentation
|
||||
- `module-readme.txt`: Module purpose, usage, dependencies
|
||||
- `folder-navigation.txt`: Navigation README for folders
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/memory:docs-full-cli` - Full project documentation generation
|
||||
- `/memory:docs` - Agent-based documentation planning workflow
|
||||
- `/memory:update-related` - Update CLAUDE.md for changed modules
|
||||
@@ -1,609 +0,0 @@
|
||||
---
|
||||
name: docs
|
||||
description: Plan documentation workflow with dynamic grouping (≤10 docs/task), generates IMPL tasks for parallel module trees, README, ARCHITECTURE, and HTTP API docs
|
||||
argument-hint: "[path] [--tool <gemini|qwen|codex>] [--mode <full|partial>] [--cli-execute]"
|
||||
---
|
||||
|
||||
# Documentation Workflow (/memory:docs)
|
||||
|
||||
## Overview
|
||||
Lightweight planner that analyzes project structure, decomposes documentation work into tasks, and generates execution plans. Does NOT generate documentation content itself - delegates to doc-generator agent.
|
||||
|
||||
**Execution Strategy**:
|
||||
- **Dynamic Task Grouping**: Level 1 tasks grouped by top-level directories with document count limit
|
||||
- **Primary constraint**: Each task generates ≤10 documents (API.md + README.md count)
|
||||
- **Optimization goal**: Prefer grouping 2 top-level directories per task for context sharing
|
||||
- **Conflict resolution**: If 2 dirs exceed 10 docs, reduce to 1 dir/task; if 1 dir exceeds 10 docs, split by subdirectories
|
||||
- **Context benefit**: Same-task directories analyzed together via single Gemini call
|
||||
- **Parallel Execution**: Multiple Level 1 tasks execute concurrently for faster completion
|
||||
- **Pre-computed Analysis**: Phase 2 performs unified analysis once, stored in `.process/` for reuse
|
||||
- **Efficient Data Loading**: All existing docs loaded once in Phase 2, shared across tasks
|
||||
|
||||
**Path Mirroring**: Documentation structure mirrors source code under `.workflow/docs/{project_name}/`
|
||||
- Example: `my_app/src/core/` → `.workflow/docs/my_app/src/core/API.md`
|
||||
|
||||
**Two Execution Modes**:
|
||||
- **Default (Agent Mode)**: CLI analyzes in `pre_analysis` (MODE=analysis), agent writes docs
|
||||
- **--cli-execute (CLI Mode)**: CLI generates docs in `implementation_approach` (MODE=write), agent executes CLI commands
|
||||
|
||||
## Path Mirroring Strategy
|
||||
|
||||
**Principle**: Documentation structure **mirrors** source code structure under project-specific directory.
|
||||
|
||||
| Source Path | Project Name | Documentation Path |
|
||||
|------------|--------------|-------------------|
|
||||
| `my_app/src/core/` | `my_app` | `.workflow/docs/my_app/src/core/API.md` |
|
||||
| `my_app/src/modules/auth/` | `my_app` | `.workflow/docs/my_app/src/modules/auth/API.md` |
|
||||
| `another_project/lib/utils/` | `another_project` | `.workflow/docs/another_project/lib/utils/API.md` |
|
||||
|
||||
**Benefits**: Easy to locate documentation, maintains logical organization, clear 1:1 mapping, supports any project structure.
|
||||
|
||||
## Parameters
|
||||
|
||||
```bash
|
||||
/memory:docs [path] [--tool <gemini|qwen|codex>] [--mode <full|partial>] [--cli-execute]
|
||||
```
|
||||
|
||||
- **path**: Target directory (default: current directory)
|
||||
- **--mode**: Documentation generation mode (default: full)
|
||||
- `full`: Complete documentation (modules + README + ARCHITECTURE + EXAMPLES + HTTP API)
|
||||
- `partial`: Module documentation only (API.md + README.md)
|
||||
- **--tool**: CLI tool selection (default: gemini)
|
||||
- `gemini`: Comprehensive documentation, pattern recognition
|
||||
- `qwen`: Architecture analysis, system design focus
|
||||
- `codex`: Implementation validation, code quality
|
||||
- **--cli-execute**: Enable CLI-based documentation generation (optional)
|
||||
|
||||
## Planning Workflow
|
||||
|
||||
### Phase 1: Initialize Session
|
||||
|
||||
```bash
|
||||
# Get target path, project name, and root
|
||||
bash(pwd && basename "$(pwd)" && git rev-parse --show-toplevel 2>/dev/null || pwd && date +%Y%m%d-%H%M%S)
|
||||
|
||||
# Create session directories (replace timestamp)
|
||||
bash(mkdir -p .workflow/WFS-docs-{timestamp}/.{task,process,summaries} && touch .workflow/.active-WFS-docs-{timestamp})
|
||||
|
||||
# Create workflow-session.json (replace values)
|
||||
bash(echo '{"session_id":"WFS-docs-{timestamp}","project":"{project} documentation","status":"planning","timestamp":"2024-01-20T14:30:22+08:00","path":".","target_path":"{target_path}","project_root":"{project_root}","project_name":"{project_name}","mode":"full","tool":"gemini","cli_execute":false}' | jq '.' > .workflow/WFS-docs-{timestamp}/workflow-session.json)
|
||||
```
|
||||
|
||||
### Phase 2: Analyze Structure
|
||||
|
||||
**Smart filter**: Auto-detect and skip tests/build/config/vendor based on project tech stack.
|
||||
|
||||
**Commands** (collect data with simple bash):
|
||||
|
||||
```bash
|
||||
# 1. Run folder analysis
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh | ~/.claude/scripts/classify-folders.sh)
|
||||
|
||||
# 2. Get top-level directories (first 2 path levels)
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh | ~/.claude/scripts/classify-folders.sh | awk -F'|' '{print $1}' | sed 's|^\./||' | awk -F'/' '{if(NF>=2) print $1"/"$2; else if(NF==1) print $1}' | sort -u)
|
||||
|
||||
# 3. Find existing docs (if directory exists)
|
||||
bash(if [ -d .workflow/docs/\${project_name} ]; then find .workflow/docs/\${project_name} -type f -name "*.md" ! -path "*/README.md" ! -path "*/ARCHITECTURE.md" ! -path "*/EXAMPLES.md" ! -path "*/api/*" 2>/dev/null; fi)
|
||||
|
||||
# 4. Read existing docs content (if files exist)
|
||||
bash(if [ -d .workflow/docs/\${project_name} ]; then find .workflow/docs/\${project_name} -type f -name "*.md" ! -path "*/README.md" ! -path "*/ARCHITECTURE.md" ! -path "*/EXAMPLES.md" ! -path "*/api/*" 2>/dev/null | xargs cat 2>/dev/null; fi)
|
||||
```
|
||||
|
||||
**Data Processing**: Parse bash outputs, calculate statistics, use **Write tool** to create `${session_dir}/.process/phase2-analysis.json` with structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"metadata": {
|
||||
"generated_at": "2025-11-03T16:57:30.469669",
|
||||
"project_name": "project_name",
|
||||
"project_root": "/path/to/project"
|
||||
},
|
||||
"folder_analysis": [
|
||||
{"path": "./src/core", "type": "code", "code_count": 5, "dirs_count": 2}
|
||||
],
|
||||
"top_level_dirs": ["src/modules", "lib/core"],
|
||||
"existing_docs": {
|
||||
"file_list": [".workflow/docs/project/src/core/API.md"],
|
||||
"content": "... existing docs content ..."
|
||||
},
|
||||
"unified_analysis": [],
|
||||
"statistics": {
|
||||
"total": 15,
|
||||
"code": 8,
|
||||
"navigation": 7,
|
||||
"top_level": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Then** use **Edit tool** to update `workflow-session.json` adding analysis field.
|
||||
|
||||
**Output**: Single `phase2-analysis.json` with all analysis data (no temp files or Python scripts).
|
||||
|
||||
**Auto-skipped**: Tests (`**/test/**`, `**/*.test.*`), Build (`**/node_modules/**`, `**/dist/**`), Config (root-level files), Vendor directories.
|
||||
|
||||
### Phase 3: Detect Update Mode
|
||||
|
||||
**Commands**:
|
||||
|
||||
```bash
|
||||
# Count existing docs from phase2-analysis.json
|
||||
bash(cat .workflow/WFS-docs-{timestamp}/.process/phase2-analysis.json | jq '.existing_docs.file_list | length')
|
||||
```
|
||||
|
||||
**Data Processing**: Use count result, then use **Edit tool** to update `workflow-session.json`:
|
||||
- Add `"update_mode": "update"` if count > 0, else `"create"`
|
||||
- Add `"existing_docs": <count>`
|
||||
|
||||
### Phase 4: Decompose Tasks
|
||||
|
||||
**Task Hierarchy** (Dynamic based on document count):
|
||||
|
||||
```
|
||||
Small Projects (total ≤10 docs):
|
||||
Level 1: IMPL-001 (all directories in single task, shared context)
|
||||
Level 2: IMPL-002 (README, full mode only)
|
||||
Level 3: IMPL-003 (ARCHITECTURE+EXAMPLES), IMPL-004 (HTTP API, optional)
|
||||
|
||||
Medium Projects (Example: 7 top-level dirs, 18 total docs):
|
||||
Step 1: Count docs per top-level dir
|
||||
├─ dir1: 3 docs, dir2: 4 docs → Group 1 (7 docs)
|
||||
├─ dir3: 5 docs, dir4: 3 docs → Group 2 (8 docs)
|
||||
├─ dir5: 2 docs → Group 3 (2 docs, can add more)
|
||||
|
||||
Step 2: Create tasks with ≤10 docs constraint
|
||||
Level 1: IMPL-001 to IMPL-003 (parallel groups)
|
||||
├─ IMPL-001: Group 1 (dir1 + dir2, 7 docs, shared context)
|
||||
├─ IMPL-002: Group 2 (dir3 + dir4, 8 docs, shared context)
|
||||
└─ IMPL-003: Group 3 (remaining dirs, ≤10 docs)
|
||||
Level 2: IMPL-004 (README, depends on Level 1, full mode only)
|
||||
Level 3: IMPL-005 (ARCHITECTURE+EXAMPLES), IMPL-006 (HTTP API, optional)
|
||||
|
||||
Large Projects (single dir >10 docs):
|
||||
Step 1: Detect oversized directory
|
||||
└─ src/modules/: 15 subdirs → 30 docs (exceeds limit)
|
||||
|
||||
Step 2: Split by subdirectories
|
||||
Level 1: IMPL-001 to IMPL-003 (split oversized dir)
|
||||
├─ IMPL-001: src/modules/ subdirs 1-5 (10 docs)
|
||||
├─ IMPL-002: src/modules/ subdirs 6-10 (10 docs)
|
||||
└─ IMPL-003: src/modules/ subdirs 11-15 (10 docs)
|
||||
```
|
||||
|
||||
**Grouping Algorithm**:
|
||||
1. Count total docs for each top-level directory
|
||||
2. Try grouping 2 directories (optimization for context sharing)
|
||||
3. If group exceeds 10 docs, split to 1 dir/task
|
||||
4. If single dir exceeds 10 docs, split by subdirectories
|
||||
5. Create parallel Level 1 tasks with ≤10 docs each
|
||||
|
||||
**Benefits**: Parallel execution, failure isolation, progress visibility, context sharing, document count control.
|
||||
|
||||
**Commands**:
|
||||
|
||||
```bash
|
||||
# 1. Get top-level directories from phase2-analysis.json
|
||||
bash(cat .workflow/WFS-docs-{timestamp}/.process/phase2-analysis.json | jq -r '.top_level_dirs[]')
|
||||
|
||||
# 2. Get mode from workflow-session.json
|
||||
bash(cat .workflow/WFS-docs-{timestamp}/workflow-session.json | jq -r '.mode // "full"')
|
||||
|
||||
# 3. Check for HTTP API
|
||||
bash(grep -r "router\.|@Get\|@Post" src/ 2>/dev/null && echo "API_FOUND" || echo "NO_API")
|
||||
```
|
||||
|
||||
**Data Processing**:
|
||||
1. Count documents for each top-level directory (from folder_analysis):
|
||||
- Code folders: 2 docs each (API.md + README.md)
|
||||
- Navigation folders: 1 doc each (README.md only)
|
||||
2. Apply grouping algorithm with ≤10 docs constraint:
|
||||
- Try grouping 2 directories, calculate total docs
|
||||
- If total ≤10 docs: create group
|
||||
- If total >10 docs: split to 1 dir/group or subdivide
|
||||
- If single dir >10 docs: split by subdirectories
|
||||
3. Use **Edit tool** to update `phase2-analysis.json` adding groups field:
|
||||
```json
|
||||
"groups": {
|
||||
"count": 3,
|
||||
"assignments": [
|
||||
{"group_id": "001", "directories": ["src/modules", "src/utils"], "doc_count": 5},
|
||||
{"group_id": "002", "directories": ["lib/core"], "doc_count": 6},
|
||||
{"group_id": "003", "directories": ["lib/helpers"], "doc_count": 3}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Task ID Calculation**:
|
||||
```bash
|
||||
group_count=$(jq '.groups.count' .workflow/WFS-docs-{timestamp}/.process/phase2-analysis.json)
|
||||
readme_id=$((group_count + 1)) # Next ID after groups
|
||||
arch_id=$((group_count + 2))
|
||||
api_id=$((group_count + 3))
|
||||
```
|
||||
|
||||
### Phase 5: Generate Task JSONs
|
||||
|
||||
**CLI Strategy**:
|
||||
|
||||
| Mode | cli_execute | Placement | CLI MODE | Approval Flag | Agent Role |
|
||||
|------|-------------|-----------|----------|---------------|------------|
|
||||
| **Agent** | false | pre_analysis | analysis | (none) | Generate docs in implementation_approach |
|
||||
| **CLI** | true | implementation_approach | write | --approval-mode yolo | Execute CLI commands, validate output |
|
||||
|
||||
**Command Patterns**:
|
||||
- Gemini/Qwen: `cd dir && gemini -p "..."`
|
||||
- CLI Mode: `cd dir && gemini --approval-mode yolo -p "..."`
|
||||
- Codex: `codex -C dir --full-auto exec "..." --skip-git-repo-check -s danger-full-access`
|
||||
|
||||
**Generation Process**:
|
||||
1. Read configuration values (tool, cli_execute, mode) from workflow-session.json
|
||||
2. Read group assignments from phase2-analysis.json
|
||||
3. Generate Level 1 tasks (IMPL-001 to IMPL-N, one per group)
|
||||
4. Generate Level 2+ tasks if mode=full (README, ARCHITECTURE, HTTP API)
|
||||
|
||||
## Task Templates
|
||||
|
||||
### Level 1: Module Trees Group Task (Unified)
|
||||
|
||||
**Execution Model**: Each task processes assigned directory group (max 2 directories) using pre-analyzed data from Phase 2.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-${group_number}",
|
||||
"title": "Document Module Trees Group ${group_number}",
|
||||
"status": "pending",
|
||||
"meta": {
|
||||
"type": "docs-tree-group",
|
||||
"agent": "@doc-generator",
|
||||
"tool": "gemini",
|
||||
"cli_execute": false,
|
||||
"group_number": "${group_number}",
|
||||
"total_groups": "${total_groups}"
|
||||
},
|
||||
"context": {
|
||||
"requirements": [
|
||||
"Process directories from group ${group_number} in phase2-analysis.json",
|
||||
"Generate docs to .workflow/docs/${project_name}/ (mirrored structure)",
|
||||
"Code folders: API.md + README.md; Navigation folders: README.md only",
|
||||
"Use pre-analyzed data from Phase 2 (no redundant analysis)"
|
||||
],
|
||||
"focus_paths": ["${group_dirs_from_json}"],
|
||||
"precomputed_data": {
|
||||
"phase2_analysis": "${session_dir}/.process/phase2-analysis.json"
|
||||
}
|
||||
},
|
||||
"flow_control": {
|
||||
"pre_analysis": [
|
||||
{
|
||||
"step": "load_precomputed_data",
|
||||
"action": "Load Phase 2 analysis and extract group directories",
|
||||
"commands": [
|
||||
"bash(cat ${session_dir}/.process/phase2-analysis.json)",
|
||||
"bash(jq '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories' ${session_dir}/.process/phase2-analysis.json)"
|
||||
],
|
||||
"output_to": "phase2_context",
|
||||
"note": "Single JSON file contains all Phase 2 analysis results"
|
||||
}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Generate documentation for assigned directory group",
|
||||
"description": "Process directories in Group ${group_number} using pre-analyzed data",
|
||||
"modification_points": [
|
||||
"Read group directories from [phase2_context].groups.assignments[${group_number}].directories",
|
||||
"For each directory: parse folder types from folder_analysis, parse structure from unified_analysis",
|
||||
"Map source_path to .workflow/docs/${project_name}/{path}",
|
||||
"Generate API.md for code folders, README.md for all folders",
|
||||
"Preserve user modifications from [phase2_context].existing_docs.content"
|
||||
],
|
||||
"logic_flow": [
|
||||
"phase2 = parse([phase2_context])",
|
||||
"dirs = phase2.groups.assignments[${group_number}].directories",
|
||||
"for dir in dirs:",
|
||||
" folder_info = find(dir, phase2.folder_analysis)",
|
||||
" outline = find(dir, phase2.unified_analysis)",
|
||||
" if folder_info.type == 'code': generate API.md + README.md",
|
||||
" elif folder_info.type == 'navigation': generate README.md only",
|
||||
" write to .workflow/docs/${project_name}/{dir}/"
|
||||
],
|
||||
"depends_on": [],
|
||||
"output": "group_module_docs"
|
||||
}
|
||||
],
|
||||
"target_files": [
|
||||
".workflow/docs/${project_name}/*/API.md",
|
||||
".workflow/docs/${project_name}/*/README.md"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**CLI Execute Mode Note**: When `cli_execute=true`, add Step 2 in `implementation_approach`:
|
||||
```json
|
||||
{
|
||||
"step": 2,
|
||||
"title": "Batch generate documentation via CLI",
|
||||
"command": "bash(dirs=$(jq -r '.groups.assignments[] | select(.group_id == \"${group_number}\") | .directories[]' ${session_dir}/.process/phase2-analysis.json); for dir in $dirs; do cd \"$dir\" && gemini --approval-mode yolo -p \"PURPOSE: Generate module docs\\nTASK: Create documentation\\nMODE: write\\nCONTEXT: @**/* [phase2_context]\\nEXPECTED: API.md and README.md\\nRULES: Mirror structure\" || echo \"Failed: $dir\"; cd -; done)",
|
||||
"depends_on": [1],
|
||||
"output": "generated_docs"
|
||||
}
|
||||
```
|
||||
|
||||
### Level 2: Project README Task
|
||||
|
||||
**Task ID**: `IMPL-${readme_id}` (where `readme_id = group_count + 1`)
|
||||
**Dependencies**: Depends on all Level 1 tasks completing.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-${readme_id}",
|
||||
"title": "Generate Project README",
|
||||
"status": "pending",
|
||||
"depends_on": ["IMPL-001", "...", "IMPL-${group_count}"],
|
||||
"meta": {"type": "docs", "agent": "@doc-generator", "tool": "gemini", "cli_execute": false},
|
||||
"flow_control": {
|
||||
"pre_analysis": [
|
||||
{
|
||||
"step": "load_existing_readme",
|
||||
"command": "bash(cat .workflow/docs/${project_name}/README.md 2>/dev/null || echo 'No existing README')",
|
||||
"output_to": "existing_readme"
|
||||
},
|
||||
{
|
||||
"step": "load_module_docs",
|
||||
"command": "bash(find .workflow/docs/${project_name} -type f -name '*.md' ! -path '.workflow/docs/${project_name}/README.md' ! -path '.workflow/docs/${project_name}/ARCHITECTURE.md' ! -path '.workflow/docs/${project_name}/EXAMPLES.md' ! -path '.workflow/docs/${project_name}/api/*' | xargs cat)",
|
||||
"output_to": "all_module_docs"
|
||||
},
|
||||
{
|
||||
"step": "analyze_project",
|
||||
"command": "bash(gemini \"PURPOSE: Analyze project structure\\nTASK: Extract overview from modules\\nMODE: analysis\\nCONTEXT: [all_module_docs]\\nEXPECTED: Project outline\")",
|
||||
"output_to": "project_outline"
|
||||
}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Generate project README",
|
||||
"description": "Generate project README with navigation links while preserving user modifications",
|
||||
"modification_points": [
|
||||
"Parse [project_outline] and [all_module_docs]",
|
||||
"Generate README structure with navigation links",
|
||||
"Preserve [existing_readme] user modifications"
|
||||
],
|
||||
"logic_flow": ["Parse data", "Generate README with navigation", "Preserve modifications"],
|
||||
"depends_on": [],
|
||||
"output": "project_readme"
|
||||
}
|
||||
],
|
||||
"target_files": [".workflow/docs/${project_name}/README.md"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Level 3: Architecture & Examples Documentation Task
|
||||
|
||||
**Task ID**: `IMPL-${arch_id}` (where `arch_id = group_count + 2`)
|
||||
**Dependencies**: Depends on Level 2 (Project README).
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-${arch_id}",
|
||||
"title": "Generate Architecture & Examples Documentation",
|
||||
"status": "pending",
|
||||
"depends_on": ["IMPL-${readme_id}"],
|
||||
"meta": {"type": "docs", "agent": "@doc-generator", "tool": "gemini", "cli_execute": false},
|
||||
"flow_control": {
|
||||
"pre_analysis": [
|
||||
{"step": "load_existing_docs", "command": "bash(cat .workflow/docs/${project_name}/{ARCHITECTURE,EXAMPLES}.md 2>/dev/null || echo 'No existing docs')", "output_to": "existing_arch_examples"},
|
||||
{"step": "load_all_docs", "command": "bash(cat .workflow/docs/${project_name}/README.md && find .workflow/docs/${project_name} -type f -name '*.md' ! -path '*/README.md' ! -path '*/ARCHITECTURE.md' ! -path '*/EXAMPLES.md' ! -path '*/api/*' | xargs cat)", "output_to": "all_docs"},
|
||||
{"step": "analyze_architecture", "command": "bash(gemini \"PURPOSE: Analyze system architecture\\nTASK: Synthesize architectural overview and examples\\nMODE: analysis\\nCONTEXT: [all_docs]\\nEXPECTED: Architecture + Examples outline\")", "output_to": "arch_examples_outline"}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Generate architecture and examples documentation",
|
||||
"modification_points": [
|
||||
"Parse [arch_examples_outline] and [all_docs]",
|
||||
"Generate ARCHITECTURE.md (system design, patterns)",
|
||||
"Generate EXAMPLES.md (code snippets, usage)",
|
||||
"Preserve [existing_arch_examples] modifications"
|
||||
],
|
||||
"depends_on": [],
|
||||
"output": "arch_examples_docs"
|
||||
}
|
||||
],
|
||||
"target_files": [".workflow/docs/${project_name}/ARCHITECTURE.md", ".workflow/docs/${project_name}/EXAMPLES.md"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Level 4: HTTP API Documentation Task (Optional)
|
||||
|
||||
**Task ID**: `IMPL-${api_id}` (where `api_id = group_count + 3`)
|
||||
**Dependencies**: Depends on Level 3.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-${api_id}",
|
||||
"title": "Generate HTTP API Documentation",
|
||||
"status": "pending",
|
||||
"depends_on": ["IMPL-${arch_id}"],
|
||||
"meta": {"type": "docs", "agent": "@doc-generator", "tool": "gemini", "cli_execute": false},
|
||||
"flow_control": {
|
||||
"pre_analysis": [
|
||||
{"step": "discover_api", "command": "bash(rg 'router\\.| @(Get|Post)' -g '*.{ts,js}')", "output_to": "endpoint_discovery"},
|
||||
{"step": "load_existing_api", "command": "bash(cat .workflow/docs/${project_name}/api/README.md 2>/dev/null || echo 'No existing API docs')", "output_to": "existing_api_docs"},
|
||||
{"step": "analyze_api", "command": "bash(gemini \"PURPOSE: Document HTTP API\\nTASK: Analyze endpoints\\nMODE: analysis\\nCONTEXT: @src/api/**/* [endpoint_discovery]\\nEXPECTED: API outline\")", "output_to": "api_outline"}
|
||||
],
|
||||
"implementation_approach": [
|
||||
{
|
||||
"step": 1,
|
||||
"title": "Generate HTTP API documentation",
|
||||
"modification_points": [
|
||||
"Parse [api_outline] and [endpoint_discovery]",
|
||||
"Document endpoints, request/response formats",
|
||||
"Preserve [existing_api_docs] modifications"
|
||||
],
|
||||
"depends_on": [],
|
||||
"output": "api_docs"
|
||||
}
|
||||
],
|
||||
"target_files": [".workflow/docs/${project_name}/api/README.md"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Session Structure
|
||||
|
||||
**Unified Structure** (single JSON replaces multiple text files):
|
||||
|
||||
```
|
||||
.workflow/
|
||||
├── .active-WFS-docs-{timestamp}
|
||||
└── WFS-docs-{timestamp}/
|
||||
├── workflow-session.json # Session metadata
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .process/
|
||||
│ └── phase2-analysis.json # All Phase 2 analysis data (replaces 7+ files)
|
||||
└── .task/
|
||||
├── IMPL-001.json # Small: all modules | Large: group 1
|
||||
├── IMPL-00N.json # (Large only: groups 2-N)
|
||||
├── IMPL-{N+1}.json # README (full mode)
|
||||
├── IMPL-{N+2}.json # ARCHITECTURE+EXAMPLES (full mode)
|
||||
└── IMPL-{N+3}.json # HTTP API (optional)
|
||||
```
|
||||
|
||||
**phase2-analysis.json Structure**:
|
||||
```json
|
||||
{
|
||||
"metadata": {
|
||||
"generated_at": "2025-11-03T16:41:06+08:00",
|
||||
"project_name": "Claude_dms3",
|
||||
"project_root": "/d/Claude_dms3"
|
||||
},
|
||||
"folder_analysis": [
|
||||
{"path": "./src/core", "type": "code", "code_count": 5, "dirs_count": 2},
|
||||
{"path": "./src/utils", "type": "navigation", "code_count": 0, "dirs_count": 4}
|
||||
],
|
||||
"top_level_dirs": ["src/modules", "src/utils", "lib/core"],
|
||||
"existing_docs": {
|
||||
"file_list": [".workflow/docs/project/src/core/API.md"],
|
||||
"content": "... concatenated existing docs ..."
|
||||
},
|
||||
"unified_analysis": [
|
||||
{"module_path": "./src/core", "outline_summary": "Core functionality"}
|
||||
],
|
||||
"groups": {
|
||||
"count": 4,
|
||||
"assignments": [
|
||||
{"group_id": "001", "directories": ["src/modules", "src/utils"], "doc_count": 6},
|
||||
{"group_id": "002", "directories": ["lib/core", "lib/helpers"], "doc_count": 7}
|
||||
]
|
||||
},
|
||||
"statistics": {
|
||||
"total": 15,
|
||||
"code": 8,
|
||||
"navigation": 7,
|
||||
"top_level": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Workflow Session Structure** (workflow-session.json):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-docs-{timestamp}",
|
||||
"project": "{project_name} documentation",
|
||||
"status": "planning",
|
||||
"timestamp": "2024-01-20T14:30:22+08:00",
|
||||
"path": ".",
|
||||
"target_path": "/path/to/project",
|
||||
"project_root": "/path/to/project",
|
||||
"project_name": "{project_name}",
|
||||
"mode": "full",
|
||||
"tool": "gemini",
|
||||
"cli_execute": false,
|
||||
"update_mode": "update",
|
||||
"existing_docs": 5,
|
||||
"analysis": {
|
||||
"total": "15",
|
||||
"code": "8",
|
||||
"navigation": "7",
|
||||
"top_level": "3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Generated Documentation
|
||||
|
||||
**Structure mirrors project source directories under project-specific folder**:
|
||||
|
||||
```
|
||||
.workflow/docs/
|
||||
└── {project_name}/ # Project-specific root
|
||||
├── src/ # Mirrors src/ directory
|
||||
│ ├── modules/
|
||||
│ │ ├── README.md # Navigation
|
||||
│ │ ├── auth/
|
||||
│ │ │ ├── API.md # API signatures
|
||||
│ │ │ ├── README.md # Module docs
|
||||
│ │ │ └── middleware/
|
||||
│ │ │ ├── API.md
|
||||
│ │ │ └── README.md
|
||||
│ │ └── api/
|
||||
│ │ ├── API.md
|
||||
│ │ └── README.md
|
||||
│ └── utils/
|
||||
│ └── README.md
|
||||
├── lib/ # Mirrors lib/ directory
|
||||
│ └── core/
|
||||
│ ├── API.md
|
||||
│ └── README.md
|
||||
├── README.md # Project root
|
||||
├── ARCHITECTURE.md # System design
|
||||
├── EXAMPLES.md # Usage examples
|
||||
└── api/ # Optional
|
||||
└── README.md # HTTP API reference
|
||||
```
|
||||
|
||||
## Execution Commands
|
||||
|
||||
```bash
|
||||
# Execute entire workflow (auto-discovers active session)
|
||||
/workflow:execute
|
||||
|
||||
# Or specify session
|
||||
/workflow:execute --resume-session="WFS-docs-yyyymmdd-hhmmss"
|
||||
|
||||
# Individual task execution
|
||||
/task:execute IMPL-001
|
||||
```
|
||||
|
||||
## Template Reference
|
||||
|
||||
**Available Templates** (`~/.claude/workflows/cli-templates/prompts/documentation/`):
|
||||
- `api.txt`: Code API (Part A) + HTTP API (Part B)
|
||||
- `module-readme.txt`: Module purpose, usage, dependencies
|
||||
- `folder-navigation.txt`: Navigation README for folders with subdirectories
|
||||
- `project-readme.txt`: Project overview, getting started, navigation
|
||||
- `project-architecture.txt`: System structure, module map, design patterns
|
||||
- `project-examples.txt`: End-to-end usage examples
|
||||
|
||||
## Execution Mode Summary
|
||||
|
||||
| Mode | CLI Placement | CLI MODE | Approval Flag | Agent Role |
|
||||
|------|---------------|----------|---------------|------------|
|
||||
| **Agent (default)** | pre_analysis | analysis | (none) | Generates documentation content |
|
||||
| **CLI (--cli-execute)** | implementation_approach | write | --approval-mode yolo | Executes CLI commands, validates output |
|
||||
|
||||
**Execution Flow**:
|
||||
- **Phase 2**: Unified analysis once, results in `.process/`
|
||||
- **Phase 4**: Dynamic grouping (max 2 dirs per group)
|
||||
- **Level 1**: Parallel processing for module tree groups
|
||||
- **Level 2+**: Sequential execution for project-level docs
|
||||
|
||||
## Related Commands
|
||||
- `/workflow:execute` - Execute documentation tasks
|
||||
- `/workflow:status` - View task progress
|
||||
- `/workflow:session:complete` - Mark session complete
|
||||
@@ -1,182 +0,0 @@
|
||||
---
|
||||
name: load-skill-memory
|
||||
description: Activate SKILL package (auto-detect from paths/keywords or manual) and intelligently load documentation based on task intent keywords
|
||||
argument-hint: "[skill_name] \"task intent description\""
|
||||
allowed-tools: Bash(*), Read(*), Skill(*)
|
||||
---
|
||||
|
||||
# Memory Load SKILL Command (/memory:load-skill-memory)
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The `memory:load-skill-memory` command **activates SKILL package** (auto-detect from task or manual specification) and intelligently loads documentation based on user's task intent. The system automatically determines which documentation files to read based on the intent description.
|
||||
|
||||
**Core Philosophy**:
|
||||
- **Flexible Activation**: Auto-detect skill from task description/paths, or user explicitly specifies
|
||||
- **Intent-Driven Loading**: System analyzes task intent to determine documentation scope
|
||||
- **Intelligent Selection**: Automatically chooses appropriate documentation level and modules
|
||||
- **Direct Context Loading**: Loads selected documentation into conversation memory
|
||||
|
||||
**When to Use**:
|
||||
- Manually activate a known SKILL package for a specific task
|
||||
- Load SKILL context when system hasn't auto-triggered it
|
||||
- Force reload SKILL documentation with specific intent focus
|
||||
|
||||
**Note**: Normal SKILL activation happens automatically via description triggers or path mentions (system extracts skill name from file paths for intelligent triggering). Use this command only when manual activation is needed.
|
||||
|
||||
## 2. Parameters
|
||||
|
||||
- `[skill_name]` (Optional): Name of SKILL package to activate
|
||||
- If omitted: System auto-detects from task description or file paths
|
||||
- If specified: Direct activation of named SKILL package
|
||||
- Example: `my_project`, `api_service`
|
||||
- Must match directory name under `.claude/skills/`
|
||||
|
||||
- `"task intent description"` (Required): Description of what you want to do
|
||||
- Used for both: auto-detection (if skill_name omitted) and documentation scope selection
|
||||
- **Analysis tasks**: "分析builder pattern实现", "理解参数系统架构"
|
||||
- **Modification tasks**: "修改workflow逻辑", "增强thermal template功能"
|
||||
- **Learning tasks**: "学习接口设计模式", "了解测试框架使用"
|
||||
- **With paths**: "修改D:\projects\my_project\src\auth.py的认证逻辑" (auto-extracts `my_project`)
|
||||
|
||||
## 3. Execution Flow
|
||||
|
||||
### Step 1: Determine SKILL Name (if not provided)
|
||||
|
||||
**Auto-Detection Strategy** (when skill_name parameter is omitted):
|
||||
1. **Path Extraction**: Scan task description for file paths
|
||||
- Extract potential project names from path segments
|
||||
- Example: `"修改D:\projects\my_project\src\auth.py"` → extracts `my_project`
|
||||
2. **Keyword Matching**: Match task keywords against SKILL descriptions
|
||||
- Search for project-specific terms, domain keywords
|
||||
3. **Validation**: Check if extracted name matches `.claude/skills/{skill_name}/`
|
||||
|
||||
**Result**: Either uses provided skill_name or auto-detected name for activation
|
||||
|
||||
### Step 2: Activate SKILL and Analyze Intent
|
||||
|
||||
**Activate SKILL Package**:
|
||||
```javascript
|
||||
Skill(command: "${skill_name}") // Uses provided or auto-detected name
|
||||
```
|
||||
|
||||
**What Happens After Activation**:
|
||||
1. If SKILL exists in memory: System reads `.claude/skills/${skill_name}/SKILL.md`
|
||||
2. If SKILL not found in memory: Error - SKILL package doesn't exist
|
||||
3. SKILL description triggers are loaded into memory
|
||||
4. Progressive loading mechanism becomes available
|
||||
5. Documentation structure is now accessible
|
||||
|
||||
**Intent Analysis**:
|
||||
Based on task intent description, system determines:
|
||||
- **Action type**: analyzing, modifying, learning
|
||||
- **Scope**: specific module, architecture overview, complete system
|
||||
- **Depth**: quick reference, detailed API, full documentation
|
||||
|
||||
### Step 3: Intelligent Documentation Loading
|
||||
|
||||
**Loading Strategy**:
|
||||
|
||||
The system automatically selects documentation based on intent keywords:
|
||||
|
||||
1. **Quick Understanding** ("了解", "快速理解", "什么是"):
|
||||
- Load: Level 0 (README.md only, ~2K tokens)
|
||||
- Use case: Quick overview of capabilities
|
||||
|
||||
2. **Specific Module Analysis** ("分析XXX模块", "理解XXX实现"):
|
||||
- Load: Module-specific README.md + API.md (~5K tokens)
|
||||
- Use case: Deep dive into specific component
|
||||
|
||||
3. **Architecture Review** ("架构", "设计模式", "整体结构"):
|
||||
- Load: README.md + ARCHITECTURE.md (~10K tokens)
|
||||
- Use case: System design understanding
|
||||
|
||||
4. **Implementation/Modification** ("修改", "增强", "实现"):
|
||||
- Load: Relevant module docs + EXAMPLES.md (~15K tokens)
|
||||
- Use case: Code modification with examples
|
||||
|
||||
5. **Comprehensive Learning** ("学习", "完整了解", "深入理解"):
|
||||
- Load: Level 3 (All documentation, ~40K tokens)
|
||||
- Use case: Complete system mastery
|
||||
|
||||
**Documentation Loaded into Memory**:
|
||||
After loading, the selected documentation content is available in conversation memory for subsequent operations.
|
||||
|
||||
## 4. Usage Examples
|
||||
|
||||
### Example 1: Manual Specification
|
||||
|
||||
**User Command**:
|
||||
```bash
|
||||
/memory:load-skill-memory my_project "修改认证模块增加OAuth支持"
|
||||
```
|
||||
|
||||
**Execution**:
|
||||
```javascript
|
||||
// Step 1: Use provided skill_name
|
||||
skill_name = "my_project" // Directly from parameter
|
||||
|
||||
// Step 2: Activate SKILL
|
||||
Skill(command: "my_project")
|
||||
|
||||
// Step 3: Intent Analysis
|
||||
Keywords: ["修改", "认证模块", "增加", "OAuth"]
|
||||
Action: modifying (implementation)
|
||||
Scope: auth module + examples
|
||||
|
||||
// Load documentation based on intent
|
||||
Read(.workflow/docs/my_project/auth/README.md)
|
||||
Read(.workflow/docs/my_project/auth/API.md)
|
||||
Read(.workflow/docs/my_project/EXAMPLES.md)
|
||||
```
|
||||
|
||||
### Example 2: Auto-Detection from Path
|
||||
|
||||
**User Command**:
|
||||
```bash
|
||||
/memory:load-skill-memory "修改D:\projects\my_project\src\services\api.py的接口逻辑"
|
||||
```
|
||||
|
||||
**Execution**:
|
||||
```javascript
|
||||
// Step 1: Auto-detect skill_name from path
|
||||
Path detected: "D:\projects\my_project\src\services\api.py"
|
||||
Extracted: "my_project"
|
||||
Validated: .claude/skills/my_project/ exists ✓
|
||||
skill_name = "my_project"
|
||||
|
||||
// Step 2: Activate SKILL
|
||||
Skill(command: "my_project")
|
||||
|
||||
// Step 3: Intent Analysis
|
||||
Keywords: ["修改", "services", "接口逻辑"]
|
||||
Action: modifying (implementation)
|
||||
Scope: services module + examples
|
||||
|
||||
// Load documentation based on intent
|
||||
Read(.workflow/docs/my_project/services/README.md)
|
||||
Read(.workflow/docs/my_project/services/API.md)
|
||||
Read(.workflow/docs/my_project/EXAMPLES.md)
|
||||
```
|
||||
|
||||
## 5. Intent Keyword Mapping
|
||||
|
||||
**Quick Reference**:
|
||||
- **Triggers**: "了解", "快速", "什么是", "简介"
|
||||
- **Loads**: README.md only (~2K)
|
||||
|
||||
**Module-Specific**:
|
||||
- **Triggers**: "XXX模块", "XXX组件", "分析XXX"
|
||||
- **Loads**: Module README + API (~5K)
|
||||
|
||||
**Architecture**:
|
||||
- **Triggers**: "架构", "设计", "整体结构", "系统设计"
|
||||
- **Loads**: README + ARCHITECTURE (~10K)
|
||||
|
||||
**Implementation**:
|
||||
- **Triggers**: "修改", "增强", "实现", "开发", "集成"
|
||||
- **Loads**: Relevant module + EXAMPLES (~15K)
|
||||
|
||||
**Comprehensive**:
|
||||
- **Triggers**: "完整", "深入", "全面", "学习整个"
|
||||
- **Loads**: All documentation (~40K)
|
||||
@@ -5,7 +5,7 @@ argument-hint: "[--tool gemini|qwen] \"task context description\""
|
||||
allowed-tools: Task(*), Bash(*)
|
||||
examples:
|
||||
- /memory:load "在当前前端基础上开发用户认证功能"
|
||||
- /memory:load --tool qwen -p "重构支付模块API"
|
||||
- /memory:load --tool qwen "重构支付模块API"
|
||||
---
|
||||
|
||||
# Memory Load Command (/memory:load)
|
||||
@@ -39,7 +39,7 @@ The command fully delegates to **universal-executor agent**, which autonomously:
|
||||
1. **Analyzes Project Structure**: Executes `get_modules_by_depth.sh` to understand architecture
|
||||
2. **Loads Documentation**: Reads CLAUDE.md, README.md and other key docs
|
||||
3. **Extracts Keywords**: Derives core keywords from task description
|
||||
4. **Discovers Files**: Uses MCP code-index or rg/find to locate relevant files
|
||||
4. **Discovers Files**: Uses CodexLens MCP or rg/find to locate relevant files
|
||||
5. **CLI Deep Analysis**: Executes Gemini/Qwen CLI for deep context analysis
|
||||
6. **Generates Content Package**: Returns structured JSON core content package
|
||||
|
||||
@@ -109,7 +109,7 @@ Task(
|
||||
|
||||
1. **Project Structure**
|
||||
\`\`\`bash
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh)
|
||||
bash(ccw tool exec get_modules_by_depth '{}')
|
||||
\`\`\`
|
||||
|
||||
2. **Core Documentation**
|
||||
@@ -136,7 +136,7 @@ Task(
|
||||
Execute Gemini/Qwen CLI for deep analysis (saves main thread tokens):
|
||||
|
||||
\`\`\`bash
|
||||
cd . && ${tool} -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Extract project core context for task: ${task_description}
|
||||
TASK: Analyze project architecture, tech stack, key patterns, relevant files
|
||||
MODE: analysis
|
||||
@@ -147,7 +147,7 @@ RULES:
|
||||
- Identify key architecture patterns and technical constraints
|
||||
- Extract integration points and development standards
|
||||
- Output concise, structured format
|
||||
"
|
||||
" --tool ${tool} --mode analysis
|
||||
\`\`\`
|
||||
|
||||
### Step 4: Generate Core Content Package
|
||||
@@ -212,7 +212,7 @@ Before returning:
|
||||
### Example 2: Using Qwen Tool
|
||||
|
||||
```bash
|
||||
/memory:load --tool qwen -p "重构支付模块API"
|
||||
/memory:load --tool qwen "重构支付模块API"
|
||||
```
|
||||
|
||||
Agent uses Qwen CLI for analysis, returns same structured package.
|
||||
|
||||
@@ -1,534 +0,0 @@
|
||||
---
|
||||
name: skill-memory
|
||||
description: 4-phase autonomous orchestrator: check docs → /memory:docs planning → /workflow:execute → generate SKILL.md with progressive loading index (skips phases 2-3 if docs exist)
|
||||
argument-hint: "[path] [--tool <gemini|qwen|codex>] [--regenerate] [--mode <full|partial>] [--cli-execute]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
# Memory SKILL Package Generator
|
||||
|
||||
## Orchestrator Role
|
||||
|
||||
**Pure Orchestrator**: Execute documentation generation workflow, then generate SKILL.md index. Does NOT create task JSON files.
|
||||
|
||||
**Auto-Continue Workflow**: This command runs **fully autonomously** once triggered. Each phase completes and automatically triggers the next phase without user interaction.
|
||||
|
||||
**Execution Paths**:
|
||||
- **Full Path**: All 4 phases (no existing docs OR `--regenerate` specified)
|
||||
- **Skip Path**: Phase 1 → Phase 4 (existing docs found AND no `--regenerate` flag)
|
||||
- **Phase 4 Always Executes**: SKILL.md index is never skipped, always generated or updated
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution
|
||||
2. **No Task JSON**: This command does not create task JSON files - delegates to /memory:docs
|
||||
3. **Parse Every Output**: Extract required data from each command output (session_id, task_count, file paths)
|
||||
4. **Auto-Continue**: After completing each phase, update TodoWrite and immediately execute next phase
|
||||
5. **Track Progress**: Update TodoWrite after EVERY phase completion before starting next phase
|
||||
6. **Direct Generation**: Phase 4 directly generates SKILL.md using Write tool
|
||||
7. **No Manual Steps**: User should never be prompted for decisions between phases
|
||||
|
||||
---
|
||||
|
||||
## 4-Phase Execution
|
||||
|
||||
### Phase 1: Prepare Arguments
|
||||
|
||||
**Goal**: Parse command arguments and check existing documentation
|
||||
|
||||
**Step 1: Get Target Path and Project Name**
|
||||
```bash
|
||||
# Get current directory (or use provided path)
|
||||
bash(pwd)
|
||||
|
||||
# Get project name from directory
|
||||
bash(basename "$(pwd)")
|
||||
|
||||
# Get project root
|
||||
bash(git rev-parse --show-toplevel 2>/dev/null || pwd)
|
||||
```
|
||||
|
||||
**Output**:
|
||||
- `target_path`: `/d/my_project`
|
||||
- `project_name`: `my_project`
|
||||
- `project_root`: `/d/my_project`
|
||||
|
||||
**Step 2: Set Default Parameters**
|
||||
```bash
|
||||
# Default values (use these unless user specifies otherwise):
|
||||
# - tool: "gemini"
|
||||
# - mode: "full"
|
||||
# - regenerate: false (no --regenerate flag)
|
||||
# - cli_execute: false (no --cli-execute flag)
|
||||
```
|
||||
|
||||
**Step 3: Check Existing Documentation**
|
||||
```bash
|
||||
# Check if docs directory exists
|
||||
bash(test -d .workflow/docs/my_project && echo "exists" || echo "not_exists")
|
||||
|
||||
# Count existing documentation files
|
||||
bash(find .workflow/docs/my_project -name "*.md" 2>/dev/null | wc -l || echo 0)
|
||||
```
|
||||
|
||||
**Output**:
|
||||
- `docs_exists`: `exists` or `not_exists`
|
||||
- `existing_docs`: `5` (or `0` if no docs)
|
||||
|
||||
**Step 4: Determine Execution Path**
|
||||
|
||||
**Decision Logic**:
|
||||
```javascript
|
||||
if (existing_docs > 0 && !regenerate_flag) {
|
||||
// Documentation exists and no regenerate flag
|
||||
SKIP_DOCS_GENERATION = true
|
||||
message = "Documentation already exists, skipping Phase 2 and Phase 3. Use --regenerate to force regeneration."
|
||||
} else if (regenerate_flag) {
|
||||
// Force regeneration: delete existing docs
|
||||
bash(rm -rf .workflow/docs/my_project 2>/dev/null || true)
|
||||
SKIP_DOCS_GENERATION = false
|
||||
message = "Regenerating documentation from scratch."
|
||||
} else {
|
||||
// No existing docs
|
||||
SKIP_DOCS_GENERATION = false
|
||||
message = "No existing documentation found, generating new documentation."
|
||||
}
|
||||
```
|
||||
|
||||
**Summary Variables**:
|
||||
- `PROJECT_NAME`: `my_project`
|
||||
- `TARGET_PATH`: `/d/my_project`
|
||||
- `DOCS_PATH`: `.workflow/docs/my_project`
|
||||
- `TOOL`: `gemini` (default) or user-specified
|
||||
- `MODE`: `full` (default) or user-specified
|
||||
- `CLI_EXECUTE`: `false` (default) or `true` if --cli-execute flag
|
||||
- `REGENERATE`: `false` (default) or `true` if --regenerate flag
|
||||
- `EXISTING_DOCS`: Count of existing documentation files
|
||||
- `SKIP_DOCS_GENERATION`: `true` if skipping Phase 2/3, `false` otherwise
|
||||
|
||||
**Completion & TodoWrite**:
|
||||
- If `SKIP_DOCS_GENERATION = true`: Mark phase 1 completed, phase 2&3 completed (skipped), phase 4 in_progress
|
||||
- If `SKIP_DOCS_GENERATION = false`: Mark phase 1 completed, phase 2 in_progress
|
||||
|
||||
**Next Action**:
|
||||
- If skipping: Display skip message → Jump to Phase 4 (SKILL.md generation)
|
||||
- If not skipping: Display preparation results → Continue to Phase 2 (documentation planning)
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Call /memory:docs
|
||||
|
||||
**Skip Condition**: This phase is **skipped if SKIP_DOCS_GENERATION = true** (documentation already exists without --regenerate flag)
|
||||
|
||||
**Goal**: Trigger documentation generation workflow
|
||||
|
||||
**Command**:
|
||||
```bash
|
||||
SlashCommand(command="/memory:docs [targetPath] --tool [tool] --mode [mode] [--cli-execute]")
|
||||
```
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
/memory:docs /d/my_app --tool gemini --mode full
|
||||
/memory:docs /d/my_app --tool gemini --mode full --cli-execute
|
||||
```
|
||||
|
||||
**Note**: The `--regenerate` flag is handled in Phase 1 by deleting existing documentation. This command always calls `/memory:docs` without the regenerate flag, relying on docs.md's built-in update detection.
|
||||
|
||||
**Parse Output**:
|
||||
- Extract session ID: `WFS-docs-[timestamp]` (store as `docsSessionId`)
|
||||
- Extract task count (store as `taskCount`)
|
||||
|
||||
**Completion Criteria**:
|
||||
- `/memory:docs` command executed successfully
|
||||
- Session ID extracted and stored
|
||||
- Task count retrieved
|
||||
- Task files created in `.workflow/[docsSessionId]/.task/`
|
||||
- workflow-session.json exists
|
||||
|
||||
**TodoWrite**: Mark phase 2 completed, phase 3 in_progress
|
||||
|
||||
**Next Action**: Display docs planning results (session ID, task count) → Auto-continue to Phase 3
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Execute Documentation Generation
|
||||
|
||||
**Skip Condition**: This phase is **skipped if SKIP_DOCS_GENERATION = true** (documentation already exists without --regenerate flag)
|
||||
|
||||
**Goal**: Execute documentation generation tasks
|
||||
|
||||
**Command**:
|
||||
```bash
|
||||
SlashCommand(command="/workflow:execute")
|
||||
```
|
||||
|
||||
**Note**: `/workflow:execute` automatically discovers active session from Phase 2
|
||||
|
||||
**Completion Criteria**:
|
||||
- `/workflow:execute` command executed successfully
|
||||
- Documentation files generated in `.workflow/docs/[projectName]/`
|
||||
- All tasks marked as completed in session
|
||||
- At minimum: module documentation files exist (API.md and/or README.md)
|
||||
- For full mode: Project README, ARCHITECTURE, EXAMPLES files generated
|
||||
|
||||
**TodoWrite**: Mark phase 3 completed, phase 4 in_progress
|
||||
|
||||
**Next Action**: Display execution results (file count, module count) → Auto-continue to Phase 4
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Generate SKILL.md Index
|
||||
|
||||
**Note**: This phase is **NEVER skipped** - it always executes to generate or update the SKILL index.
|
||||
|
||||
**Step 1: Read Key Files** (Use Read tool)
|
||||
- `.workflow/docs/{project_name}/README.md` (required)
|
||||
- `.workflow/docs/{project_name}/ARCHITECTURE.md` (optional)
|
||||
|
||||
**Step 2: Discover Structure**
|
||||
```bash
|
||||
bash(find .workflow/docs/{project_name} -name "*.md" | sed 's|.workflow/docs/{project_name}/||' | awk -F'/' '{if(NF>=2) print $1"/"$2}' | sort -u)
|
||||
```
|
||||
|
||||
**Step 3: Generate Intelligent Description**
|
||||
|
||||
Extract from README + structure: Function (capabilities), Modules (names), Keywords (API/CLI/auth/etc.)
|
||||
|
||||
**Format**: `{Project} {core capabilities} (located at {project_path}). Load this SKILL when analyzing, modifying, or learning about {domain_description} or files under this path, especially when no relevant context exists in memory.`
|
||||
|
||||
**Key Elements**:
|
||||
- **Path Reference**: Use `TARGET_PATH` from Phase 1 for precise location identification
|
||||
- **Domain Description**: Extract human-readable domain/feature area from README (e.g., "workflow management", "thermal modeling")
|
||||
- **Trigger Optimization**: Include project path, emphasize "especially when no relevant context exists in memory"
|
||||
- **Action Coverage**: analyzing (分析), modifying (修改), learning (了解)
|
||||
|
||||
**Example**: "Workflow orchestration system with CLI tools and documentation generation (located at /d/Claude_dms3). Load this SKILL when analyzing, modifying, or learning about workflow management or files under this path, especially when no relevant context exists in memory."
|
||||
|
||||
**Step 4: Write SKILL.md** (Use Write tool)
|
||||
```bash
|
||||
bash(mkdir -p .claude/skills/{project_name})
|
||||
```
|
||||
|
||||
`.claude/skills/{project_name}/SKILL.md`:
|
||||
```yaml
|
||||
---
|
||||
name: {project_name}
|
||||
description: {intelligent description from Step 3}
|
||||
version: 1.0.0
|
||||
---
|
||||
# {Project Name} SKILL Package
|
||||
|
||||
## Documentation: `../../../.workflow/docs/{project_name}/`
|
||||
|
||||
## Progressive Loading
|
||||
### Level 0: Quick Start (~2K)
|
||||
- [README](../../../.workflow/docs/{project_name}/README.md)
|
||||
### Level 1: Core Modules (~8K)
|
||||
{Module READMEs}
|
||||
### Level 2: Complete (~25K)
|
||||
All modules + [Architecture](../../../.workflow/docs/{project_name}/ARCHITECTURE.md)
|
||||
### Level 3: Deep Dive (~40K)
|
||||
Everything + [Examples](../../../.workflow/docs/{project_name}/EXAMPLES.md)
|
||||
```
|
||||
|
||||
**Completion Criteria**:
|
||||
- SKILL.md file created at `.claude/skills/{project_name}/SKILL.md`
|
||||
- Intelligent description generated from documentation
|
||||
- Progressive loading levels (0-3) properly structured
|
||||
- Module index includes all documented modules
|
||||
- All file references use relative paths
|
||||
|
||||
**TodoWrite**: Mark phase 4 completed
|
||||
|
||||
**Final Action**: Report completion summary to user
|
||||
|
||||
**Return to User**:
|
||||
```
|
||||
SKILL Package Generation Complete
|
||||
|
||||
Project: {project_name}
|
||||
Documentation: .workflow/docs/{project_name}/ ({doc_count} files)
|
||||
SKILL Index: .claude/skills/{project_name}/SKILL.md
|
||||
|
||||
Generated:
|
||||
- {task_count} documentation tasks completed
|
||||
- SKILL.md with progressive loading (4 levels)
|
||||
- Module index with {module_count} modules
|
||||
|
||||
Usage:
|
||||
- Load Level 0: Quick project overview (~2K tokens)
|
||||
- Load Level 1: Core modules (~8K tokens)
|
||||
- Load Level 2: Complete docs (~25K tokens)
|
||||
- Load Level 3: Everything (~40K tokens)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Critical Rules
|
||||
|
||||
1. **No User Prompts Between Phases**: Never ask user questions or wait for input between phases
|
||||
2. **Immediate Phase Transition**: After TodoWrite update, immediately execute next phase command
|
||||
3. **Status-Driven Execution**: Check TodoList status after each phase:
|
||||
- If next task is "pending" → Mark it "in_progress" and execute
|
||||
- If all tasks are "completed" → Report final summary
|
||||
4. **Phase Completion Pattern**:
|
||||
```
|
||||
Phase N completes → Update TodoWrite (N=completed, N+1=in_progress) → Execute Phase N+1
|
||||
```
|
||||
|
||||
### TodoWrite Patterns
|
||||
|
||||
#### Initialization (Before Phase 1)
|
||||
|
||||
**FIRST ACTION**: Create TodoList with all 4 phases
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "in_progress", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "pending", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "pending", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
```
|
||||
|
||||
**SECOND ACTION**: Execute Phase 1 immediately
|
||||
|
||||
#### Full Path (SKIP_DOCS_GENERATION = false)
|
||||
|
||||
**After Phase 1**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "in_progress", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "pending", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Auto-continue to Phase 2
|
||||
```
|
||||
|
||||
**After Phase 2**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "completed", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "in_progress", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Auto-continue to Phase 3
|
||||
```
|
||||
|
||||
**After Phase 3**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "completed", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "completed", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Auto-continue to Phase 4
|
||||
```
|
||||
|
||||
**After Phase 4**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "completed", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "completed", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "completed", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Report completion summary to user
|
||||
```
|
||||
|
||||
#### Skip Path (SKIP_DOCS_GENERATION = true)
|
||||
|
||||
**After Phase 1** (detects existing docs, skips Phase 2 & 3):
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "completed", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "completed", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Display skip message: "Documentation already exists, skipping Phase 2 and Phase 3. Use --regenerate to force regeneration."
|
||||
// Jump directly to Phase 4
|
||||
```
|
||||
|
||||
**After Phase 4**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Parse arguments and prepare", "status": "completed", "activeForm": "Parsing arguments"},
|
||||
{"content": "Call /memory:docs to plan documentation", "status": "completed", "activeForm": "Calling /memory:docs"},
|
||||
{"content": "Execute documentation generation", "status": "completed", "activeForm": "Executing documentation"},
|
||||
{"content": "Generate SKILL.md index", "status": "completed", "activeForm": "Generating SKILL.md"}
|
||||
]})
|
||||
// Report completion summary to user
|
||||
```
|
||||
|
||||
### Execution Flow Diagrams
|
||||
|
||||
#### Full Path Flow
|
||||
```
|
||||
User triggers command
|
||||
↓
|
||||
[TodoWrite] Initialize 4 phases (Phase 1 = in_progress)
|
||||
↓
|
||||
[Execute] Phase 1: Parse arguments
|
||||
↓
|
||||
[TodoWrite] Phase 1 = completed, Phase 2 = in_progress
|
||||
↓
|
||||
[Execute] Phase 2: Call /memory:docs
|
||||
↓
|
||||
[TodoWrite] Phase 2 = completed, Phase 3 = in_progress
|
||||
↓
|
||||
[Execute] Phase 3: Call /workflow:execute
|
||||
↓
|
||||
[TodoWrite] Phase 3 = completed, Phase 4 = in_progress
|
||||
↓
|
||||
[Execute] Phase 4: Generate SKILL.md
|
||||
↓
|
||||
[TodoWrite] Phase 4 = completed
|
||||
↓
|
||||
[Report] Display completion summary
|
||||
```
|
||||
|
||||
#### Skip Path Flow
|
||||
```
|
||||
User triggers command
|
||||
↓
|
||||
[TodoWrite] Initialize 4 phases (Phase 1 = in_progress)
|
||||
↓
|
||||
[Execute] Phase 1: Parse arguments, detect existing docs
|
||||
↓
|
||||
[TodoWrite] Phase 1 = completed, Phase 2&3 = completed (skipped), Phase 4 = in_progress
|
||||
↓
|
||||
[Display] Skip message: "Documentation already exists, skipping Phase 2 and Phase 3"
|
||||
↓
|
||||
[Execute] Phase 4: Generate SKILL.md (always runs)
|
||||
↓
|
||||
[TodoWrite] Phase 4 = completed
|
||||
↓
|
||||
[Report] Display completion summary
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
- If any phase fails, mark it as "in_progress" (not completed)
|
||||
- Report error details to user
|
||||
- Do NOT auto-continue to next phase on failure
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
```bash
|
||||
/memory:skill-memory [path] [--tool <gemini|qwen|codex>] [--regenerate] [--mode <full|partial>] [--cli-execute]
|
||||
```
|
||||
|
||||
- **path**: Target directory (default: current directory)
|
||||
- **--tool**: CLI tool for documentation (default: gemini)
|
||||
- `gemini`: Comprehensive documentation
|
||||
- `qwen`: Architecture analysis
|
||||
- `codex`: Implementation validation
|
||||
- **--regenerate**: Force regenerate all documentation
|
||||
- When enabled: Deletes existing `.workflow/docs/{project_name}/` before regeneration
|
||||
- Ensures fresh documentation from source code
|
||||
- **--mode**: Documentation mode (default: full)
|
||||
- `full`: Complete docs (modules + README + ARCHITECTURE + EXAMPLES)
|
||||
- `partial`: Module docs only
|
||||
- **--cli-execute**: Enable CLI-based documentation generation (optional)
|
||||
- When enabled: CLI generates docs directly in implementation_approach
|
||||
- When disabled (default): Agent generates documentation content
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Generate SKILL Package (Default)
|
||||
|
||||
```bash
|
||||
/memory:skill-memory
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects current directory, checks existing docs
|
||||
2. Phase 2: Calls `/memory:docs . --tool gemini --mode full` (Agent Mode)
|
||||
3. Phase 3: Executes documentation generation via `/workflow:execute`
|
||||
4. Phase 4: Generates SKILL.md at `.claude/skills/{project_name}/SKILL.md`
|
||||
|
||||
### Example 2: Regenerate with Qwen
|
||||
|
||||
```bash
|
||||
/memory:skill-memory /d/my_app --tool qwen --regenerate
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Parses target path, detects regenerate flag, deletes existing docs
|
||||
2. Phase 2: Calls `/memory:docs /d/my_app --tool qwen --mode full`
|
||||
3. Phase 3: Executes documentation regeneration
|
||||
4. Phase 4: Generates updated SKILL.md
|
||||
|
||||
### Example 3: Partial Mode (Modules Only)
|
||||
|
||||
```bash
|
||||
/memory:skill-memory --mode partial
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects partial mode
|
||||
2. Phase 2: Calls `/memory:docs . --tool gemini --mode partial` (Agent Mode)
|
||||
3. Phase 3: Executes module documentation only
|
||||
4. Phase 4: Generates SKILL.md with module-only index
|
||||
|
||||
### Example 4: CLI Execute Mode
|
||||
|
||||
```bash
|
||||
/memory:skill-memory --cli-execute
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects CLI execute mode
|
||||
2. Phase 2: Calls `/memory:docs . --tool gemini --mode full --cli-execute` (CLI Mode)
|
||||
3. Phase 3: Executes CLI-based documentation generation
|
||||
4. Phase 4: Generates SKILL.md at `.claude/skills/{project_name}/SKILL.md`
|
||||
|
||||
### Example 5: Skip Path (Existing Docs)
|
||||
|
||||
```bash
|
||||
/memory:skill-memory
|
||||
```
|
||||
|
||||
**Scenario**: Documentation already exists in `.workflow/docs/{project_name}/`
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects existing docs (5 files), sets SKIP_DOCS_GENERATION = true
|
||||
2. Display: "Documentation already exists, skipping Phase 2 and Phase 3. Use --regenerate to force regeneration."
|
||||
3. Phase 4: Generates or updates SKILL.md index only (~5-10x faster)
|
||||
|
||||
---
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Pure Orchestrator**: No task JSON generation, delegates to /memory:docs
|
||||
- **Auto-Continue**: Autonomous 4-phase execution without user interaction
|
||||
- **Intelligent Skip**: Detects existing docs and skips regeneration for fast SKILL updates
|
||||
- **Always Fresh Index**: Phase 4 always executes to ensure SKILL.md stays synchronized
|
||||
- **Simplified**: ~70% less code than previous version
|
||||
- **Maintainable**: Changes to /memory:docs automatically apply
|
||||
- **Direct Generation**: Phase 4 directly writes SKILL.md
|
||||
- **Flexible**: Supports all /memory:docs options (tool, mode, cli-execute)
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
skill-memory (orchestrator)
|
||||
├─ Phase 1: Prepare (bash commands, skip decision)
|
||||
├─ Phase 2: /memory:docs (task planning, skippable)
|
||||
├─ Phase 3: /workflow:execute (task execution, skippable)
|
||||
└─ Phase 4: Write SKILL.md (direct file generation, always runs)
|
||||
|
||||
No task JSON created by this command
|
||||
All documentation tasks managed by /memory:docs
|
||||
Smart skip logic: 5-10x faster when docs exist
|
||||
```
|
||||
@@ -21,12 +21,14 @@ auto-continue: true
|
||||
**Key Features**:
|
||||
- Extracts primary design references (colors, typography, spacing, etc.)
|
||||
- Provides dynamic adjustment guidelines for design tokens
|
||||
- Includes prerequisites and tooling requirements (browsers, PostCSS, dark mode)
|
||||
- Progressive loading structure for efficient token usage
|
||||
- Complete implementation examples with React components
|
||||
- Interactive preview showcase
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
## Quick Reference
|
||||
|
||||
### Command Syntax
|
||||
|
||||
@@ -51,20 +53,77 @@ package-name Style reference package name (required)
|
||||
/memory:style-skill-memory
|
||||
```
|
||||
|
||||
### Key Variables
|
||||
|
||||
**Input Variables**:
|
||||
- `PACKAGE_NAME`: Style reference package name
|
||||
- `PACKAGE_DIR`: `.workflow/reference_style/${package_name}`
|
||||
- `SKILL_DIR`: `.claude/skills/style-${package_name}`
|
||||
- `REGENERATE`: `true` if --regenerate flag, `false` otherwise
|
||||
|
||||
**Data Sources** (Phase 2):
|
||||
- `DESIGN_TOKENS_DATA`: Complete design-tokens.json content (from Read)
|
||||
- `LAYOUT_TEMPLATES_DATA`: Complete layout-templates.json content (from Read)
|
||||
- `ANIMATION_TOKENS_DATA`: Complete animation-tokens.json content (from Read, if exists)
|
||||
|
||||
**Metadata** (Phase 2):
|
||||
- `COMPONENT_COUNT`: Total components
|
||||
- `UNIVERSAL_COUNT`: Universal components count
|
||||
- `SPECIALIZED_COUNT`: Specialized components count
|
||||
- `UNIVERSAL_COMPONENTS`: Universal component names (first 5)
|
||||
- `HAS_ANIMATIONS`: Whether animation-tokens.json exists
|
||||
|
||||
**Analysis Output** (`DESIGN_ANALYSIS` - Phase 2):
|
||||
- `has_colors`: Colors exist
|
||||
- `color_semantic`: Has semantic naming (primary/secondary/accent)
|
||||
- `uses_oklch`: Uses modern color spaces (oklch, lab, etc.)
|
||||
- `has_dark_mode`: Has separate light/dark mode color tokens
|
||||
- `spacing_pattern`: Pattern type ("linear", "geometric", "custom")
|
||||
- `spacing_scale`: Actual scale values (e.g., [4, 8, 16, 32, 64])
|
||||
- `has_typography`: Typography system exists
|
||||
- `typography_hierarchy`: Has size scale for hierarchy
|
||||
- `uses_calc`: Uses calc() expressions in token values
|
||||
- `has_radius`: Border radius exists
|
||||
- `radius_style`: Style characteristic ("sharp" <4px, "moderate" 4-8px, "rounded" >8px)
|
||||
- `has_shadows`: Shadow system exists
|
||||
- `shadow_pattern`: Elevation naming pattern
|
||||
- `has_animations`: Animation tokens exist
|
||||
- `animation_range`: Duration range (fast to slow)
|
||||
- `easing_variety`: Types of easing functions
|
||||
|
||||
### Common Errors
|
||||
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Package not found | Invalid package name or doesn't exist | Run `/workflow:ui-design:codify-style` first |
|
||||
| SKILL already exists | SKILL.md already generated | Use `--regenerate` flag |
|
||||
| Missing layout-templates.json | Incomplete package | Verify package integrity, re-run codify-style |
|
||||
| Invalid JSON format | Corrupted package files | Regenerate package with codify-style |
|
||||
|
||||
---
|
||||
|
||||
## Execution Process
|
||||
|
||||
### Phase 1: Validate Package
|
||||
|
||||
**Purpose**: Check if style reference package exists
|
||||
|
||||
**TodoWrite** (First Action):
|
||||
```json
|
||||
[
|
||||
{"content": "Validate style reference package", "status": "in_progress", "activeForm": "Validating package"},
|
||||
{"content": "Read package data and extract design references", "status": "pending", "activeForm": "Reading package data"},
|
||||
{"content": "Generate SKILL.md with progressive loading", "status": "pending", "activeForm": "Generating SKILL.md"}
|
||||
{
|
||||
"content": "Validate package exists and check SKILL status",
|
||||
"activeForm": "Validating package and SKILL status",
|
||||
"status": "in_progress"
|
||||
},
|
||||
{
|
||||
"content": "Read package data and analyze design system",
|
||||
"activeForm": "Reading package data and analyzing design system",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"content": "Generate SKILL.md with design principles and token values",
|
||||
"activeForm": "Generating SKILL.md with design principles and token values",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
@@ -75,8 +134,6 @@ package-name Style reference package name (required)
|
||||
bash(echo "${package_name}" || basename "$(pwd)" | sed 's/^style-//')
|
||||
```
|
||||
|
||||
Store result as `package_name`
|
||||
|
||||
**Step 2: Validate Package Exists**
|
||||
|
||||
```bash
|
||||
@@ -113,152 +170,90 @@ if (regenerate_flag && skill_exists) {
|
||||
}
|
||||
```
|
||||
|
||||
**Summary Variables**:
|
||||
- `PACKAGE_NAME`: Style reference package name
|
||||
- `PACKAGE_DIR`: `.workflow/reference_style/${package_name}`
|
||||
- `SKILL_DIR`: `.claude/skills/style-${package_name}`
|
||||
- `REGENERATE`: `true` if --regenerate flag, `false` otherwise
|
||||
|
||||
**TodoWrite Update**:
|
||||
```json
|
||||
[
|
||||
{"content": "Validate style reference package", "status": "completed", "activeForm": "Validating package"},
|
||||
{"content": "Read package data and extract design references", "status": "in_progress", "activeForm": "Reading package data"}
|
||||
]
|
||||
```
|
||||
**TodoWrite Update**: Mark "Validate" as completed, "Read package data" as in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Read Package Data & Extract Design References
|
||||
### Phase 2: Read Package Data & Analyze Design System
|
||||
|
||||
**Purpose**: Extract package information and primary design references for SKILL description generation
|
||||
|
||||
**Step 1: Count Components**
|
||||
**Step 1: Read All JSON Files**
|
||||
|
||||
```bash
|
||||
bash(jq '.layout_templates | length' .workflow/reference_style/${package_name}/layout-templates.json 2>/dev/null || echo 0)
|
||||
```
|
||||
# Read layout templates
|
||||
Read(file_path=".workflow/reference_style/${package_name}/layout-templates.json")
|
||||
|
||||
Store result as `component_count`
|
||||
|
||||
**Step 2: Extract Component Types and Classification**
|
||||
|
||||
```bash
|
||||
# Extract component names from layout templates
|
||||
bash(jq -r '.layout_templates | keys[]' .workflow/reference_style/${package_name}/layout-templates.json 2>/dev/null | head -10)
|
||||
|
||||
# Count universal vs specialized components
|
||||
bash(jq '[.layout_templates[] | select(.component_type == "universal")] | length' .workflow/reference_style/${package_name}/layout-templates.json 2>/dev/null || echo 0)
|
||||
bash(jq '[.layout_templates[] | select(.component_type == "specialized")] | length' .workflow/reference_style/${package_name}/layout-templates.json 2>/dev/null || echo 0)
|
||||
|
||||
# Extract universal component names only
|
||||
bash(jq -r '.layout_templates | to_entries | map(select(.value.component_type == "universal")) | .[].key' .workflow/reference_style/${package_name}/layout-templates.json 2>/dev/null | head -10)
|
||||
```
|
||||
|
||||
Store as:
|
||||
- `COMPONENT_TYPES`: List of available component types (all)
|
||||
- `UNIVERSAL_COUNT`: Number of universal (reusable) components
|
||||
- `SPECIALIZED_COUNT`: Number of specialized (project-specific) components
|
||||
- `UNIVERSAL_COMPONENTS`: List of universal component names
|
||||
|
||||
**Step 3: Read Design Tokens**
|
||||
|
||||
```bash
|
||||
# Read design tokens
|
||||
Read(file_path=".workflow/reference_style/${package_name}/design-tokens.json")
|
||||
|
||||
# Read animation tokens (if exists)
|
||||
bash(test -f .workflow/reference_style/${package_name}/animation-tokens.json && echo "exists" || echo "missing")
|
||||
Read(file_path=".workflow/reference_style/${package_name}/animation-tokens.json") # if exists
|
||||
```
|
||||
|
||||
**Extract Primary Design References**:
|
||||
|
||||
**Colors** (top 3-5 most important):
|
||||
```bash
|
||||
bash(jq -r '.colors | to_entries | .[0:5] | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/design-tokens.json 2>/dev/null | head -5)
|
||||
```
|
||||
|
||||
**Typography** (heading and body fonts):
|
||||
```bash
|
||||
bash(jq -r '.typography | to_entries | select(.key | contains("family")) | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/design-tokens.json 2>/dev/null)
|
||||
```
|
||||
|
||||
**Spacing Scale** (base spacing values):
|
||||
```bash
|
||||
bash(jq -r '.spacing | to_entries | .[0:5] | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/design-tokens.json 2>/dev/null)
|
||||
```
|
||||
|
||||
**Border Radius** (base radius values):
|
||||
```bash
|
||||
bash(jq -r '.border_radius | to_entries | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/design-tokens.json 2>/dev/null)
|
||||
```
|
||||
|
||||
**Shadows** (elevation levels):
|
||||
```bash
|
||||
bash(jq -r '.shadows | to_entries | .[0:3] | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/design-tokens.json 2>/dev/null)
|
||||
```
|
||||
|
||||
Store extracted references as:
|
||||
- `PRIMARY_COLORS`: List of primary color tokens
|
||||
- `TYPOGRAPHY_FONTS`: Font family tokens
|
||||
- `SPACING_SCALE`: Base spacing values
|
||||
- `BORDER_RADIUS`: Radius values
|
||||
- `SHADOWS`: Shadow definitions
|
||||
|
||||
**Step 4: Read Animation Tokens (if available)**
|
||||
**Step 2: Extract Metadata for Description**
|
||||
|
||||
```bash
|
||||
# Check if animation tokens exist
|
||||
bash(test -f .workflow/reference_style/${package_name}/animation-tokens.json && echo "available" || echo "not_available")
|
||||
# Count components and classify by type
|
||||
bash(jq '.layout_templates | length' layout-templates.json)
|
||||
bash(jq '[.layout_templates[] | select(.component_type == "universal")] | length' layout-templates.json)
|
||||
bash(jq '[.layout_templates[] | select(.component_type == "specialized")] | length' layout-templates.json)
|
||||
bash(jq -r '.layout_templates | to_entries[] | select(.value.component_type == "universal") | .key' layout-templates.json | head -5)
|
||||
```
|
||||
|
||||
If available, extract:
|
||||
```bash
|
||||
Read(file_path=".workflow/reference_style/${package_name}/animation-tokens.json")
|
||||
Store results in metadata variables (see [Key Variables](#key-variables))
|
||||
|
||||
# Extract primary animation values
|
||||
bash(jq -r '.duration | to_entries | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/animation-tokens.json 2>/dev/null)
|
||||
bash(jq -r '.easing | to_entries | .[0:3] | .[] | "\(.key): \(.value)"' .workflow/reference_style/${package_name}/animation-tokens.json 2>/dev/null)
|
||||
```
|
||||
**Step 3: Analyze Design System for Dynamic Principles**
|
||||
|
||||
Store as:
|
||||
- `ANIMATION_DURATIONS`: Animation duration tokens
|
||||
- `EASING_FUNCTIONS`: Easing function tokens
|
||||
|
||||
**Step 5: Count Files**
|
||||
Analyze design-tokens.json to extract characteristics and patterns:
|
||||
|
||||
```bash
|
||||
bash(cd .workflow/reference_style/${package_name} && ls -1 *.json *.html *.css 2>/dev/null | wc -l)
|
||||
# Color system characteristics
|
||||
bash(jq '.colors | keys' design-tokens.json)
|
||||
bash(jq '.colors | to_entries[0:2] | map(.value)' design-tokens.json)
|
||||
# Check for modern color spaces
|
||||
bash(jq '.colors | to_entries[] | .value | test("oklch|lab|lch")' design-tokens.json)
|
||||
# Check for dark mode variants
|
||||
bash(jq '.colors | keys | map(select(contains("dark") or contains("light")))' design-tokens.json)
|
||||
# → Store: has_colors, color_semantic, uses_oklch, has_dark_mode
|
||||
|
||||
# Spacing pattern detection
|
||||
bash(jq '.spacing | to_entries | map(.value) | map(gsub("[^0-9.]"; "") | tonumber)' design-tokens.json)
|
||||
# Analyze pattern: linear (4-8-12-16) vs geometric (4-8-16-32) vs custom
|
||||
# → Store: spacing_pattern, spacing_scale
|
||||
|
||||
# Typography characteristics
|
||||
bash(jq '.typography | keys | map(select(contains("family") or contains("weight")))' design-tokens.json)
|
||||
bash(jq '.typography | to_entries | map(select(.key | contains("size"))) | .[].value' design-tokens.json)
|
||||
# Check for calc() usage
|
||||
bash(jq '. | tostring | test("calc\\(")' design-tokens.json)
|
||||
# → Store: has_typography, typography_hierarchy, uses_calc
|
||||
|
||||
# Border radius style
|
||||
bash(jq '.border_radius | to_entries | map(.value)' design-tokens.json)
|
||||
# Check range: small (sharp <4px) vs moderate (4-8px) vs large (rounded >8px)
|
||||
# → Store: has_radius, radius_style
|
||||
|
||||
# Shadow characteristics
|
||||
bash(jq '.shadows | keys' design-tokens.json)
|
||||
bash(jq '.shadows | to_entries[0].value' design-tokens.json)
|
||||
# → Store: has_shadows, shadow_pattern
|
||||
|
||||
# Animations (if available)
|
||||
bash(jq '.duration | to_entries | map(.value)' animation-tokens.json)
|
||||
bash(jq '.easing | keys' animation-tokens.json)
|
||||
# → Store: has_animations, animation_range, easing_variety
|
||||
```
|
||||
|
||||
Store result as `file_count`
|
||||
Store analysis results in `DESIGN_ANALYSIS` (see [Key Variables](#key-variables))
|
||||
|
||||
**Summary Data Collected**:
|
||||
- `COMPONENT_COUNT`: Number of components in layout templates
|
||||
- `UNIVERSAL_COUNT`: Number of universal (reusable) components
|
||||
- `SPECIALIZED_COUNT`: Number of specialized (project-specific) components
|
||||
- `COMPONENT_TYPES`: List of component types (first 10)
|
||||
- `UNIVERSAL_COMPONENTS`: List of universal component names (first 10)
|
||||
- `FILE_COUNT`: Total files in package
|
||||
- `HAS_ANIMATIONS`: Whether animation tokens are available
|
||||
- `PRIMARY_COLORS`: Primary color tokens with values
|
||||
- `TYPOGRAPHY_FONTS`: Font family tokens
|
||||
- `SPACING_SCALE`: Base spacing scale
|
||||
- `BORDER_RADIUS`: Border radius values
|
||||
- `SHADOWS`: Shadow definitions
|
||||
- `ANIMATION_DURATIONS`: Animation durations (if available)
|
||||
- `EASING_FUNCTIONS`: Easing functions (if available)
|
||||
**Note**: Analysis focuses on characteristics and patterns, not counts. Include technical feature detection (oklch, calc, dark mode) for Prerequisites section.
|
||||
|
||||
**TodoWrite Update**:
|
||||
```json
|
||||
[
|
||||
{"content": "Read package data and extract design references", "status": "completed", "activeForm": "Reading package data"},
|
||||
{"content": "Generate SKILL.md with progressive loading", "status": "in_progress", "activeForm": "Generating SKILL.md"}
|
||||
]
|
||||
```
|
||||
**TodoWrite Update**: Mark "Read package data" as completed, "Generate SKILL.md" as in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Generate SKILL.md
|
||||
|
||||
**Purpose**: Create SKILL memory index with progressive loading structure and design references
|
||||
|
||||
**Step 1: Create SKILL Directory**
|
||||
|
||||
```bash
|
||||
@@ -272,336 +267,57 @@ bash(mkdir -p .claude/skills/style-${package_name})
|
||||
{package_name} project-independent design system with {universal_count} universal layout templates and interactive preview (located at .workflow/reference_style/{package_name}). Load when working with reusable UI components, design tokens, layout patterns, or implementing visual consistency. Excludes {specialized_count} project-specific components.
|
||||
```
|
||||
|
||||
**Key Elements**:
|
||||
- **Universal Count**: Emphasize available reusable layout templates
|
||||
- **Project Independence**: Clearly state project-independent nature
|
||||
- **Specialized Exclusion**: Mention excluded project-specific components
|
||||
- **Path Reference**: Precise package location
|
||||
- **Trigger Keywords**: reusable UI components, design tokens, layout patterns, visual consistency
|
||||
- **Action Coverage**: working with, analyzing, implementing
|
||||
**Step 3: Load and Process SKILL.md Template**
|
||||
|
||||
**Example**:
|
||||
```
|
||||
main-app-style-v1 project-independent design system with 5 universal layout templates and interactive preview (located at .workflow/reference_style/main-app-style-v1). Load when working with reusable UI components, design tokens, layout patterns, or implementing visual consistency. Excludes 3 project-specific components.
|
||||
```
|
||||
|
||||
**Step 3: Write SKILL.md**
|
||||
|
||||
Use Write tool to generate SKILL.md with the following complete content:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: style-{package_name}
|
||||
description: {intelligent description from Step 2}
|
||||
---
|
||||
|
||||
# {Package Name} Style SKILL Package
|
||||
|
||||
## Documentation: `../../../.workflow/reference_style/{package_name}/`
|
||||
|
||||
## Package Overview
|
||||
|
||||
**Project-independent style reference package** extracted from codebase with reusable design patterns, tokens, and interactive preview.
|
||||
|
||||
**Package Details**:
|
||||
- Package: {package_name}
|
||||
- Layout Templates: {component_count} total
|
||||
- **Universal Components**: {universal_count} (reusable, project-independent)
|
||||
- **Specialized Components**: {specialized_count} (project-specific, excluded from reference)
|
||||
- Universal Component Types: {comma-separated list of UNIVERSAL_COMPONENTS}
|
||||
- Files: {file_count}
|
||||
- Animation Tokens: {has_animations ? "✓ Available" : "Not available"}
|
||||
|
||||
**⚠️ IMPORTANT - Project Independence**:
|
||||
This SKILL package represents a **pure style system** independent of any specific project implementation:
|
||||
- **Universal components** are generic, reusable patterns (buttons, inputs, cards, navigation)
|
||||
- **Specialized components** are project-specific implementations (excluded from this reference)
|
||||
- All design tokens and layout patterns are extracted for **reference purposes only**
|
||||
- Adapt and customize these references based on your project's specific requirements
|
||||
|
||||
---
|
||||
|
||||
## ⚡ Primary Design References
|
||||
|
||||
**IMPORTANT**: These are **reference values** extracted from the codebase. They should be **dynamically adjusted** based on your specific design needs, not treated as fixed constraints.
|
||||
|
||||
### 🎨 Colors
|
||||
|
||||
{FOR each color in PRIMARY_COLORS:
|
||||
- **{color.key}**: `{color.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- These colors establish the foundation of the design system
|
||||
- Adjust saturation, lightness, or hue based on:
|
||||
- Brand requirements and accessibility needs
|
||||
- Context (light/dark mode, high-contrast themes)
|
||||
- User feedback and A/B testing results
|
||||
- Use color theory principles to maintain harmony when modifying
|
||||
|
||||
### 📝 Typography
|
||||
|
||||
{FOR each font in TYPOGRAPHY_FONTS:
|
||||
- **{font.key}**: `{font.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- Font families can be substituted based on:
|
||||
- Brand identity and design language
|
||||
- Performance requirements (web fonts vs. system fonts)
|
||||
- Accessibility and readability considerations
|
||||
- Platform-specific availability
|
||||
- Maintain hierarchy and scale relationships when changing fonts
|
||||
|
||||
### 📏 Spacing Scale
|
||||
|
||||
{FOR each spacing in SPACING_SCALE:
|
||||
- **{spacing.key}**: `{spacing.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- Spacing values form a consistent rhythm system
|
||||
- Adjust scale based on:
|
||||
- Target device (mobile vs. desktop vs. tablet)
|
||||
- Content density requirements
|
||||
- Component-specific needs (compact vs. comfortable layouts)
|
||||
- Maintain proportional relationships when scaling
|
||||
|
||||
### 🔲 Border Radius
|
||||
|
||||
{FOR each radius in BORDER_RADIUS:
|
||||
- **{radius.key}**: `{radius.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- Border radius affects visual softness and modernity
|
||||
- Adjust based on:
|
||||
- Design aesthetic (sharp vs. rounded vs. pill-shaped)
|
||||
- Component type (buttons, cards, inputs have different needs)
|
||||
- Platform conventions (iOS vs. Android vs. Web)
|
||||
|
||||
### 🌫️ Shadows
|
||||
|
||||
{FOR each shadow in SHADOWS:
|
||||
- **{shadow.key}**: `{shadow.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- Shadows create elevation and depth perception
|
||||
- Adjust based on:
|
||||
- Material design depth levels
|
||||
- Light/dark mode contexts
|
||||
- Performance considerations (complex shadows impact rendering)
|
||||
- Visual hierarchy needs
|
||||
|
||||
{IF HAS_ANIMATIONS:
|
||||
### ⏱️ Animation & Timing
|
||||
|
||||
**Durations**:
|
||||
{FOR each duration in ANIMATION_DURATIONS:
|
||||
- **{duration.key}**: `{duration.value}`
|
||||
}
|
||||
|
||||
**Easing Functions**:
|
||||
{FOR each easing in EASING_FUNCTIONS:
|
||||
- **{easing.key}**: `{easing.value}`
|
||||
}
|
||||
|
||||
**Usage Guidelines**:
|
||||
- Animation timing affects perceived responsiveness and polish
|
||||
- Adjust based on:
|
||||
- User expectations and platform conventions
|
||||
- Accessibility preferences (reduced motion)
|
||||
- Animation type (micro-interactions vs. page transitions)
|
||||
- Performance constraints (mobile vs. desktop)
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Design Adaptation Strategies
|
||||
|
||||
### When to Adjust Design References
|
||||
|
||||
**Brand Alignment**:
|
||||
- Modify colors to match brand identity and guidelines
|
||||
- Adjust typography to reflect brand personality
|
||||
- Tune spacing and radius to align with brand aesthetic
|
||||
|
||||
**Accessibility Requirements**:
|
||||
- Increase color contrast ratios for WCAG compliance
|
||||
- Adjust font sizes and spacing for readability
|
||||
- Modify animation durations for reduced-motion preferences
|
||||
|
||||
**Platform Optimization**:
|
||||
- Adapt spacing for mobile touch targets (min 44x44px)
|
||||
- Adjust shadows and radius for platform conventions
|
||||
- Optimize animation performance for target devices
|
||||
|
||||
**Context-Specific Needs**:
|
||||
- Dark mode: Adjust colors, shadows, and contrasts
|
||||
- High-density displays: Fine-tune spacing and sizing
|
||||
- Responsive design: Scale tokens across breakpoints
|
||||
|
||||
### How to Apply Adjustments
|
||||
|
||||
1. **Identify Need**: Determine which tokens need adjustment based on your specific requirements
|
||||
2. **Maintain Relationships**: Preserve proportional relationships between related tokens
|
||||
3. **Test Thoroughly**: Validate changes across components and use cases
|
||||
4. **Document Changes**: Track modifications and rationale for team alignment
|
||||
5. **Iterate**: Refine based on user feedback and testing results
|
||||
|
||||
---
|
||||
|
||||
## Progressive Loading
|
||||
|
||||
### Level 0: Design Tokens (~5K tokens)
|
||||
|
||||
Essential design token system for consistent styling.
|
||||
|
||||
**Files**:
|
||||
- [Design Tokens](../../../.workflow/reference_style/{package_name}/design-tokens.json) - Colors, typography, spacing, shadows, borders
|
||||
|
||||
**Use when**: Quick token reference, applying consistent styles, color/typography queries
|
||||
|
||||
---
|
||||
|
||||
### Level 1: Universal Layout Templates (~12K tokens)
|
||||
|
||||
**Project-independent** component layout patterns for reusable UI elements.
|
||||
|
||||
**Files**:
|
||||
- Level 0 files
|
||||
- [Layout Templates](../../../.workflow/reference_style/{package_name}/layout-templates.json) - Component structures with HTML/CSS patterns
|
||||
|
||||
**⚠️ Reference Strategy**:
|
||||
- **Only reference components with `component_type: "universal"`** - these are reusable, project-independent patterns
|
||||
- **Ignore components with `component_type: "specialized"`** - these are project-specific implementations
|
||||
- Universal components include: buttons, inputs, forms, cards, navigation, modals, etc.
|
||||
- Use universal patterns as **reference templates** to adapt for your specific project needs
|
||||
|
||||
**Use when**: Building components, understanding component architecture, implementing layouts
|
||||
|
||||
---
|
||||
|
||||
### Level 2: Complete System (~20K tokens)
|
||||
|
||||
Full design system with animations and interactive preview.
|
||||
|
||||
**Files**:
|
||||
- All Level 1 files
|
||||
- [Animation Tokens](../../../.workflow/reference_style/{package_name}/animation-tokens.json) - Animation durations, easing, transitions _(if available)_
|
||||
- [Preview HTML](../../../.workflow/reference_style/{package_name}/preview.html) - Interactive showcase (reference only)
|
||||
- [Preview CSS](../../../.workflow/reference_style/{package_name}/preview.css) - Showcase styling (reference only)
|
||||
|
||||
**Use when**: Comprehensive analysis, animation development, complete design system understanding
|
||||
|
||||
---
|
||||
|
||||
## Interactive Preview
|
||||
|
||||
**Location**: `.workflow/reference_style/{package_name}/preview.html`
|
||||
|
||||
**View in Browser**:
|
||||
**⚠️ CRITICAL - Execute First**:
|
||||
```bash
|
||||
cd .workflow/reference_style/{package_name}
|
||||
python -m http.server 8080
|
||||
# Open http://localhost:8080/preview.html
|
||||
bash(cat ~/.claude/workflows/cli-templates/memory/style-skill-memory/skill-md-template.md)
|
||||
```
|
||||
|
||||
**Features**:
|
||||
- Color palette swatches with values
|
||||
- Typography scale and combinations
|
||||
- All components with variants and states
|
||||
- Spacing, radius, shadow visual examples
|
||||
- Interactive state demonstrations
|
||||
- Usage code snippets
|
||||
**Template Processing**:
|
||||
1. **Replace variables**: Substitute all `{variable}` placeholders with actual values from Phase 2
|
||||
2. **Generate dynamic sections**:
|
||||
- **Prerequisites & Tooling**: Generate based on `DESIGN_ANALYSIS` technical features (oklch, calc, dark mode)
|
||||
- **Design Principles**: Generate based on `DESIGN_ANALYSIS` characteristics
|
||||
- **Complete Implementation Example**: Include React component example with token adaptation
|
||||
- **Design Token Values**: Iterate `DESIGN_TOKENS_DATA`, `ANIMATION_TOKENS_DATA` and display all key-value pairs with DEFAULT annotations
|
||||
3. **Write to file**: Use Write tool to save to `.claude/skills/style-{package_name}/SKILL.md`
|
||||
|
||||
---
|
||||
**Variable Replacement Map**:
|
||||
- `{package_name}` → PACKAGE_NAME
|
||||
- `{intelligent_description}` → Generated description from Step 2
|
||||
- `{component_count}` → COMPONENT_COUNT
|
||||
- `{universal_count}` → UNIVERSAL_COUNT
|
||||
- `{specialized_count}` → SPECIALIZED_COUNT
|
||||
- `{universal_components_list}` → UNIVERSAL_COMPONENTS (comma-separated)
|
||||
- `{has_animations}` → HAS_ANIMATIONS
|
||||
|
||||
## Usage Guidelines
|
||||
**Dynamic Content Generation**:
|
||||
|
||||
### Loading Levels
|
||||
See template file for complete structure. Key dynamic sections:
|
||||
|
||||
**Level 0** (5K): Design tokens only
|
||||
```
|
||||
Load Level 0 for design token reference
|
||||
```
|
||||
1. **Prerequisites & Tooling** (based on DESIGN_ANALYSIS technical features):
|
||||
- IF uses_oklch → Include PostCSS plugin requirement (`postcss-oklab-function`)
|
||||
- IF uses_calc → Include preprocessor requirement for calc() expressions
|
||||
- IF has_dark_mode → Include dark mode implementation mechanism (class or media query)
|
||||
- ALWAYS include browser support, jq installation, and local server setup
|
||||
|
||||
**Level 1** (12K): Tokens + layout templates
|
||||
```
|
||||
Load Level 1 for layout templates and design tokens
|
||||
```
|
||||
2. **Design Principles** (based on DESIGN_ANALYSIS):
|
||||
- IF has_colors → Include "Color System" principle with semantic pattern
|
||||
- IF spacing_pattern detected → Include "Spatial Rhythm" with unified scale description (actual token values)
|
||||
- IF has_typography_hierarchy → Include "Typographic System" with scale examples
|
||||
- IF has_radius → Include "Shape Language" with style characteristic
|
||||
- IF has_shadows → Include "Depth & Elevation" with elevation pattern
|
||||
- IF has_animations → Include "Motion & Timing" with duration range
|
||||
- ALWAYS include "Accessibility First" principle
|
||||
|
||||
**Level 2** (20K): Complete system with animations and preview
|
||||
```
|
||||
Load Level 2 for complete design system with preview reference
|
||||
```
|
||||
|
||||
### Common Use Cases
|
||||
|
||||
**Implementing UI Components**:
|
||||
- Load Level 1 for universal layout templates
|
||||
- **Only reference components with `component_type: "universal"`** in layout-templates.json
|
||||
- Apply design tokens from design-tokens.json
|
||||
- Adapt patterns to your project's specific requirements
|
||||
|
||||
**Ensuring Style Consistency**:
|
||||
- Load Level 0 for design tokens
|
||||
- Use design-tokens.json for colors, typography, spacing
|
||||
- Check preview.html for visual reference (universal components only)
|
||||
|
||||
**Analyzing Component Patterns**:
|
||||
- Load Level 2 for complete analysis
|
||||
- Review layout-templates.json for component architecture
|
||||
- **Filter for `component_type: "universal"` to exclude project-specific implementations**
|
||||
- Check preview.html for implementation examples
|
||||
|
||||
**Animation Development**:
|
||||
- Load Level 2 for animation tokens (if available)
|
||||
- Reference animation-tokens.json for durations and easing
|
||||
- Apply consistent timing and transitions
|
||||
|
||||
**⚠️ Critical Usage Rule**:
|
||||
This is a **project-independent style reference system**. When working with layout-templates.json:
|
||||
- **USE**: Components marked `component_type: "universal"` as reusable reference patterns
|
||||
- **IGNORE**: Components marked `component_type: "specialized"` (project-specific implementations)
|
||||
- **ADAPT**: All patterns should be customized for your specific project needs
|
||||
|
||||
---
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
.workflow/reference_style/{package_name}/
|
||||
├── layout-templates.json # Layout templates from codebase
|
||||
├── design-tokens.json # Design token system
|
||||
├── animation-tokens.json # Animation tokens (optional)
|
||||
├── preview.html # Interactive showcase
|
||||
└── preview.css # Showcase styling
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Regeneration
|
||||
|
||||
To update this SKILL memory after package changes:
|
||||
|
||||
```bash
|
||||
/memory:style-skill-memory {package_name} --regenerate
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Related Commands
|
||||
|
||||
**Generate Package**:
|
||||
```bash
|
||||
/workflow:ui-design:codify-style --source ./src --package-name {package_name}
|
||||
```
|
||||
|
||||
**Update Package**:
|
||||
Re-run codify-style with same package name to update extraction.
|
||||
```
|
||||
3. **Design Token Values** (iterate from read data):
|
||||
- Colors: Iterate `DESIGN_TOKENS_DATA.colors`
|
||||
- Typography: Iterate `DESIGN_TOKENS_DATA.typography`
|
||||
- Spacing: Iterate `DESIGN_TOKENS_DATA.spacing`
|
||||
- Border Radius: Iterate `DESIGN_TOKENS_DATA.border_radius` with calc() explanations
|
||||
- Shadows: Iterate `DESIGN_TOKENS_DATA.shadows` with DEFAULT token annotations
|
||||
- Animations (if available): Iterate `ANIMATION_TOKENS_DATA.duration` and `ANIMATION_TOKENS_DATA.easing`
|
||||
|
||||
**Step 4: Verify SKILL.md Created**
|
||||
|
||||
@@ -609,115 +325,27 @@ Re-run codify-style with same package name to update extraction.
|
||||
bash(test -f .claude/skills/style-${package_name}/SKILL.md && echo "success" || echo "failed")
|
||||
```
|
||||
|
||||
**TodoWrite Update**:
|
||||
```json
|
||||
[
|
||||
{"content": "Validate style reference package", "status": "completed", "activeForm": "Validating package"},
|
||||
{"content": "Read package data and extract design references", "status": "completed", "activeForm": "Reading package data"},
|
||||
{"content": "Generate SKILL.md with progressive loading", "status": "completed", "activeForm": "Generating SKILL.md"}
|
||||
]
|
||||
```
|
||||
|
||||
**Final Action**: Report completion summary to user
|
||||
**TodoWrite Update**: Mark all todos as completed
|
||||
|
||||
---
|
||||
|
||||
## Completion Message
|
||||
### Completion Message
|
||||
|
||||
Display extracted primary design references to user:
|
||||
Display a simple completion message with key information:
|
||||
|
||||
```
|
||||
✅ SKILL memory generated successfully!
|
||||
✅ SKILL memory generated for style package: {package_name}
|
||||
|
||||
Package: {package_name}
|
||||
SKILL Location: .claude/skills/style-{package_name}/SKILL.md
|
||||
📁 Location: .claude/skills/style-{package_name}/SKILL.md
|
||||
|
||||
📦 Package Details:
|
||||
- Layout Templates: {component_count} total
|
||||
- Universal (reusable): {universal_count}
|
||||
- Specialized (project-specific): {specialized_count}
|
||||
- Universal Component Types: {show first 5 UNIVERSAL_COMPONENTS, then "+ X more"}
|
||||
- Files: {file_count}
|
||||
- Animation Tokens: {has_animations ? "✓ Available" : "Not available"}
|
||||
📊 Package Summary:
|
||||
- {component_count} components ({universal_count} universal, {specialized_count} specialized)
|
||||
- Design tokens: colors, typography, spacing, shadows{animations_note}
|
||||
|
||||
🎨 Primary Design References Extracted:
|
||||
{IF PRIMARY_COLORS exists:
|
||||
Colors:
|
||||
{show first 3 PRIMARY_COLORS with key: value}
|
||||
{if more than 3: + X more colors}
|
||||
}
|
||||
|
||||
{IF TYPOGRAPHY_FONTS exists:
|
||||
Typography:
|
||||
{show all TYPOGRAPHY_FONTS}
|
||||
}
|
||||
|
||||
{IF SPACING_SCALE exists:
|
||||
Spacing Scale:
|
||||
{show first 3 SPACING_SCALE items}
|
||||
{if more than 3: + X more spacing tokens}
|
||||
}
|
||||
|
||||
{IF BORDER_RADIUS exists:
|
||||
Border Radius:
|
||||
{show all BORDER_RADIUS}
|
||||
}
|
||||
|
||||
{IF HAS_ANIMATIONS:
|
||||
Animation:
|
||||
Durations: {count ANIMATION_DURATIONS} tokens
|
||||
Easing: {count EASING_FUNCTIONS} functions
|
||||
}
|
||||
|
||||
⚡ Progressive Loading Levels:
|
||||
- Level 0: Design Tokens (~5K tokens)
|
||||
- Level 1: Tokens + Layout Templates (~12K tokens)
|
||||
- Level 2: Complete System (~20K tokens)
|
||||
|
||||
💡 Usage:
|
||||
Load design system context when working with:
|
||||
- UI component implementation
|
||||
- Layout pattern analysis
|
||||
- Design token application
|
||||
- Style consistency validation
|
||||
|
||||
⚠️ IMPORTANT - Project Independence:
|
||||
This is a **project-independent style reference system**:
|
||||
- Only use universal components (component_type: "universal") as reference patterns
|
||||
- Ignore specialized components (component_type: "specialized") - they are project-specific
|
||||
- The extracted design references are REFERENCE VALUES, not fixed constraints
|
||||
- Dynamically adjust colors, spacing, typography, and other tokens based on:
|
||||
- Brand requirements and accessibility needs
|
||||
- Platform-specific conventions and optimizations
|
||||
- Context (light/dark mode, responsive breakpoints)
|
||||
- User feedback and testing results
|
||||
|
||||
See SKILL.md for detailed adjustment guidelines and component filtering instructions.
|
||||
|
||||
🎯 Preview:
|
||||
Open interactive showcase:
|
||||
file://{absolute_path}/.workflow/reference_style/{package_name}/preview.html
|
||||
|
||||
📋 Next Steps:
|
||||
1. Load appropriate level based on your task context
|
||||
2. Review Primary Design References section for key design tokens
|
||||
3. Apply design tokens with dynamic adjustments as needed
|
||||
4. Reference layout-templates.json for component structures
|
||||
5. Use Design Adaptation Strategies when modifying tokens
|
||||
💡 Usage: /memory:load-skill-memory style-{package_name} "your task description"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Errors
|
||||
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Package not found | Invalid package name or package doesn't exist | Run codify-style first to create package |
|
||||
| SKILL already exists | SKILL.md already generated | Use --regenerate to force regeneration |
|
||||
| Missing layout-templates.json | Incomplete package | Verify package integrity, re-run codify-style |
|
||||
| Invalid JSON format | Corrupted package files | Regenerate package with codify-style |
|
||||
Variables: `{package_name}`, `{component_count}`, `{universal_count}`, `{specialized_count}`, `{animations_note}` (", animations" if exists)
|
||||
|
||||
---
|
||||
|
||||
@@ -727,144 +355,42 @@ Open interactive showcase:
|
||||
|
||||
1. **Check Before Generate**: Verify package exists before attempting SKILL generation
|
||||
2. **Respect Existing SKILL**: Don't overwrite unless --regenerate flag provided
|
||||
3. **Extract Primary References**: Always extract and display key design values (colors, typography, spacing, border radius, shadows, animations)
|
||||
4. **Include Adjustment Guidance**: Provide clear guidelines on when and how to dynamically adjust design tokens
|
||||
5. **Progressive Loading**: Always include all 3 levels (0-2) with clear token estimates
|
||||
6. **Intelligent Description**: Extract component count and key features from metadata
|
||||
3. **Load Templates via cat**: Use `cat ~/.claude/workflows/cli-templates/memory/style-skill-memory/{template}` to load templates
|
||||
4. **Variable Substitution**: Replace all `{variable}` placeholders with actual values
|
||||
5. **Technical Feature Detection**: Analyze tokens for modern features (oklch, calc, dark mode) and generate appropriate Prerequisites section
|
||||
6. **Dynamic Content Generation**: Generate sections based on DESIGN_ANALYSIS characteristics
|
||||
7. **Unified Spacing Scale**: Use actual token values as primary scale reference, avoid contradictory pattern descriptions
|
||||
8. **Direct Iteration**: Iterate data structures (DESIGN_TOKENS_DATA, etc.) for token values
|
||||
9. **Annotate Special Tokens**: Add comments for DEFAULT tokens and calc() expressions
|
||||
10. **Embed jq Commands**: Include bash/jq commands in SKILL.md for dynamic loading
|
||||
11. **Progressive Loading**: Include all 3 levels (0-2) with specific jq commands
|
||||
12. **Complete Examples**: Include end-to-end implementation examples (React components)
|
||||
13. **Intelligent Description**: Extract component count and key features from metadata
|
||||
14. **Emphasize Flexibility**: Strongly warn against rigid copying - values are references for creative adaptation
|
||||
|
||||
### SKILL Description Format
|
||||
### Template Files Location
|
||||
|
||||
**Template**:
|
||||
```
|
||||
{package_name} project-independent design system with {universal_count} universal layout templates and interactive preview (located at .workflow/reference_style/{package_name}). Load when working with reusable UI components, design tokens, layout patterns, or implementing visual consistency. Excludes {specialized_count} project-specific components.
|
||||
```
|
||||
|
||||
**Required Elements**:
|
||||
- Package name
|
||||
- Universal layout template count (emphasize reusability)
|
||||
- Project independence statement
|
||||
- Specialized component exclusion notice
|
||||
- Location (full path)
|
||||
- Trigger keywords (reusable UI components, design tokens, layout patterns, visual consistency)
|
||||
- Action verbs (working with, analyzing, implementing)
|
||||
|
||||
### Primary Design References Extraction
|
||||
|
||||
**Required Data Extraction** (from design-tokens.json):
|
||||
- Colors: Primary, secondary, accent colors (top 3-5)
|
||||
- Typography: Font families for headings and body text
|
||||
- Spacing Scale: Base spacing values (xs, sm, md, lg, xl)
|
||||
- Border Radius: All radius tokens
|
||||
- Shadows: Shadow definitions (top 3 elevation levels)
|
||||
|
||||
**Component Classification Extraction** (from layout-templates.json):
|
||||
- Universal Count: Number of components with `component_type: "universal"`
|
||||
- Specialized Count: Number of components with `component_type: "specialized"`
|
||||
- Universal Component Names: List of universal component names (first 10)
|
||||
|
||||
**Optional Data Extraction** (from animation-tokens.json if available):
|
||||
- Animation Durations: All duration tokens
|
||||
- Easing Functions: Top 3 easing functions
|
||||
|
||||
**Extraction Format**:
|
||||
Use `jq` to extract tokens from JSON files. Each token should include key and value.
|
||||
For component classification, filter by `component_type` field.
|
||||
|
||||
### Dynamic Adjustment Guidelines
|
||||
|
||||
**Include in SKILL.md**:
|
||||
1. **Usage Guidelines per Category**: Specific guidance for each token category
|
||||
2. **Adjustment Strategies**: When to adjust design references
|
||||
3. **Practical Examples**: Context-specific adaptation scenarios
|
||||
4. **Best Practices**: How to maintain design system coherence while adjusting
|
||||
|
||||
### Progressive Loading Structure
|
||||
|
||||
**Level 0** (~5K tokens):
|
||||
- design-tokens.json
|
||||
|
||||
**Level 1** (~12K tokens):
|
||||
- Level 0 files
|
||||
- layout-templates.json
|
||||
|
||||
**Level 2** (~20K tokens):
|
||||
- Level 1 files
|
||||
- animation-tokens.json (if exists)
|
||||
- preview.html
|
||||
- preview.css
|
||||
|
||||
---
|
||||
|
||||
## Benefits
|
||||
|
||||
- **Project Independence**: Clear separation between universal (reusable) and specialized (project-specific) components
|
||||
- **Component Filtering**: Automatic classification helps identify which patterns are truly reusable
|
||||
- **Fast Context Loading**: Progressive levels for efficient token usage
|
||||
- **Primary Design References**: Extracted key design values (colors, typography, spacing, etc.) displayed prominently
|
||||
- **Dynamic Adjustment Guidance**: Clear instructions on when and how to adjust design tokens
|
||||
- **Intelligent Triggering**: Keywords optimize SKILL activation
|
||||
- **Complete Reference**: All package files accessible through SKILL
|
||||
- **Easy Regeneration**: Simple --regenerate flag for updates
|
||||
- **Clear Structure**: Organized levels by use case with component type filtering
|
||||
- **Practical Usage Guidelines**: Context-specific adjustment strategies and component selection criteria
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
style-skill-memory
|
||||
├─ Phase 1: Validate
|
||||
│ ├─ Parse package name from argument or auto-detect
|
||||
│ ├─ Check package exists in .workflow/reference_style/
|
||||
│ └─ Check if SKILL already exists (skip if exists and no --regenerate)
|
||||
│
|
||||
├─ Phase 2: Read Package Data & Extract Primary References
|
||||
│ ├─ Count components from layout-templates.json
|
||||
│ ├─ Extract component types list
|
||||
│ ├─ Extract primary colors from design-tokens.json (top 3-5)
|
||||
│ ├─ Extract typography (font families)
|
||||
│ ├─ Extract spacing scale (base values)
|
||||
│ ├─ Extract border radius tokens
|
||||
│ ├─ Extract shadow definitions (top 3)
|
||||
│ ├─ Extract animation tokens (if available)
|
||||
│ └─ Count total files in package
|
||||
│
|
||||
└─ Phase 3: Generate SKILL.md
|
||||
├─ Create SKILL directory
|
||||
├─ Generate intelligent description with keywords
|
||||
├─ Write SKILL.md with complete structure:
|
||||
│ ├─ Package Overview
|
||||
│ ├─ Primary Design References
|
||||
│ │ ├─ Colors with usage guidelines
|
||||
│ │ ├─ Typography with usage guidelines
|
||||
│ │ ├─ Spacing with usage guidelines
|
||||
│ │ ├─ Border Radius with usage guidelines
|
||||
│ │ ├─ Shadows with usage guidelines
|
||||
│ │ └─ Animation & Timing (if available)
|
||||
│ ├─ Design Adaptation Strategies
|
||||
│ │ ├─ When to adjust design references
|
||||
│ │ └─ How to apply adjustments
|
||||
│ ├─ Progressive Loading (3 levels)
|
||||
│ ├─ Interactive Preview
|
||||
│ ├─ Usage Guidelines
|
||||
│ ├─ Package Structure
|
||||
│ ├─ Regeneration
|
||||
│ └─ Related Commands
|
||||
├─ Verify SKILL.md created successfully
|
||||
└─ Display completion message with extracted design references
|
||||
Phase 1: Validate
|
||||
├─ Parse package_name
|
||||
├─ Check PACKAGE_DIR exists
|
||||
└─ Check SKILL_DIR exists (skip if exists and no --regenerate)
|
||||
|
||||
Data Flow:
|
||||
design-tokens.json → jq extraction → PRIMARY_COLORS, TYPOGRAPHY_FONTS,
|
||||
SPACING_SCALE, BORDER_RADIUS, SHADOWS
|
||||
animation-tokens.json → jq extraction → ANIMATION_DURATIONS, EASING_FUNCTIONS
|
||||
layout-templates.json → jq extraction → COMPONENT_COUNT, UNIVERSAL_COUNT,
|
||||
SPECIALIZED_COUNT, UNIVERSAL_COMPONENTS
|
||||
→ component_type filtering → Universal vs Specialized classification
|
||||
Phase 2: Read & Analyze
|
||||
├─ Read design-tokens.json → DESIGN_TOKENS_DATA
|
||||
├─ Read layout-templates.json → LAYOUT_TEMPLATES_DATA
|
||||
├─ Read animation-tokens.json → ANIMATION_TOKENS_DATA (if exists)
|
||||
├─ Extract Metadata → COMPONENT_COUNT, UNIVERSAL_COUNT, etc.
|
||||
└─ Analyze Design System → DESIGN_ANALYSIS (characteristics)
|
||||
|
||||
Extracted data → SKILL.md generation → Primary Design References section
|
||||
→ Component Classification section
|
||||
→ Dynamic Adjustment Guidelines
|
||||
→ Project Independence warnings
|
||||
→ Completion message display
|
||||
Phase 3: Generate
|
||||
├─ Create SKILL directory
|
||||
├─ Generate intelligent description
|
||||
├─ Load SKILL.md template (cat command)
|
||||
├─ Replace variables and generate dynamic content
|
||||
├─ Write SKILL.md
|
||||
├─ Verify creation
|
||||
├─ Load completion message template (cat command)
|
||||
└─ Display completion message
|
||||
```
|
||||
|
||||
@@ -1,477 +0,0 @@
|
||||
---
|
||||
name: tech-research
|
||||
description: 3-phase orchestrator: extract tech stack from session/name → delegate to agent for Exa research and module generation → generate SKILL.md index (skips phase 2 if exists)
|
||||
argument-hint: "[session-id | tech-stack-name] [--regenerate] [--tool <gemini|qwen>]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Bash(*), Read(*), Write(*), Task(*)
|
||||
---
|
||||
|
||||
# Tech Stack Research SKILL Generator
|
||||
|
||||
## Overview
|
||||
|
||||
**Pure Orchestrator with Agent Delegation**: Prepares context paths and delegates ALL work to agent. Agent produces files directly.
|
||||
|
||||
**Auto-Continue Workflow**: Runs fully autonomously once triggered. Each phase completes and automatically triggers the next phase.
|
||||
|
||||
**Execution Paths**:
|
||||
- **Full Path**: All 3 phases (no existing SKILL OR `--regenerate` specified)
|
||||
- **Skip Path**: Phase 1 → Phase 3 (existing SKILL found AND no `--regenerate` flag)
|
||||
- **Phase 3 Always Executes**: SKILL index is always generated or updated
|
||||
|
||||
**Agent Responsibility**:
|
||||
- Agent does ALL the work: context reading, Exa research, content synthesis, file writing
|
||||
- Orchestrator only provides context paths and waits for completion
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 execution
|
||||
2. **Context Path Delegation**: Pass session directory or tech stack name to agent, let agent do discovery
|
||||
3. **Agent Produces Files**: Agent directly writes all module files, orchestrator does NOT parse agent output
|
||||
4. **Auto-Continue**: After completing each phase, update TodoWrite and immediately execute next phase
|
||||
5. **No User Prompts**: Never ask user questions or wait for input between phases
|
||||
6. **Track Progress**: Update TodoWrite after EVERY phase completion before starting next phase
|
||||
7. **Lightweight Index**: Phase 3 only generates SKILL.md index by reading existing files
|
||||
|
||||
---
|
||||
|
||||
## 3-Phase Execution
|
||||
|
||||
### Phase 1: Prepare Context Paths
|
||||
|
||||
**Goal**: Detect input mode, prepare context paths for agent, check existing SKILL
|
||||
|
||||
**Input Mode Detection**:
|
||||
```bash
|
||||
# Get input parameter
|
||||
input="$1"
|
||||
|
||||
# Detect mode
|
||||
if [[ "$input" == WFS-* ]]; then
|
||||
MODE="session"
|
||||
SESSION_ID="$input"
|
||||
CONTEXT_PATH=".workflow/${SESSION_ID}"
|
||||
else
|
||||
MODE="direct"
|
||||
TECH_STACK_NAME="$input"
|
||||
CONTEXT_PATH="$input" # Pass tech stack name as context
|
||||
fi
|
||||
```
|
||||
|
||||
**Check Existing SKILL**:
|
||||
```bash
|
||||
# For session mode, peek at session to get tech stack name
|
||||
if [[ "$MODE" == "session" ]]; then
|
||||
bash(test -f ".workflow/${SESSION_ID}/workflow-session.json")
|
||||
Read(.workflow/${SESSION_ID}/workflow-session.json)
|
||||
# Extract tech_stack_name (minimal extraction)
|
||||
fi
|
||||
|
||||
# Normalize and check
|
||||
normalized_name=$(echo "$TECH_STACK_NAME" | tr '[:upper:]' '[:lower:]' | tr ' ' '-')
|
||||
bash(test -d ".claude/skills/${normalized_name}" && echo "exists" || echo "not_exists")
|
||||
bash(find ".claude/skills/${normalized_name}" -name "*.md" 2>/dev/null | wc -l || echo 0)
|
||||
```
|
||||
|
||||
**Skip Decision**:
|
||||
```javascript
|
||||
if (existing_files > 0 && !regenerate_flag) {
|
||||
SKIP_GENERATION = true
|
||||
message = "Tech stack SKILL already exists, skipping Phase 2. Use --regenerate to force regeneration."
|
||||
} else if (regenerate_flag) {
|
||||
bash(rm -rf ".claude/skills/${normalized_name}")
|
||||
SKIP_GENERATION = false
|
||||
message = "Regenerating tech stack SKILL from scratch."
|
||||
} else {
|
||||
SKIP_GENERATION = false
|
||||
message = "No existing SKILL found, generating new tech stack documentation."
|
||||
}
|
||||
```
|
||||
|
||||
**Output Variables**:
|
||||
- `MODE`: `session` or `direct`
|
||||
- `SESSION_ID`: Session ID (if session mode)
|
||||
- `CONTEXT_PATH`: Path to session directory OR tech stack name
|
||||
- `TECH_STACK_NAME`: Extracted or provided tech stack name
|
||||
- `SKIP_GENERATION`: Boolean - whether to skip Phase 2
|
||||
|
||||
**TodoWrite**:
|
||||
- If skipping: Mark phase 1 completed, phase 2 completed, phase 3 in_progress
|
||||
- If not skipping: Mark phase 1 completed, phase 2 in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Agent Produces All Files
|
||||
|
||||
**Skip Condition**: Skipped if `SKIP_GENERATION = true`
|
||||
|
||||
**Goal**: Delegate EVERYTHING to agent - context reading, Exa research, content synthesis, and file writing
|
||||
|
||||
**Agent Task Specification**:
|
||||
|
||||
```
|
||||
Task(
|
||||
subagent_type: "general-purpose",
|
||||
description: "Generate tech stack SKILL: {CONTEXT_PATH}",
|
||||
prompt: "
|
||||
Generate a complete tech stack SKILL package with Exa research.
|
||||
|
||||
**Context Provided**:
|
||||
- Mode: {MODE}
|
||||
- Context Path: {CONTEXT_PATH}
|
||||
|
||||
**Templates Available**:
|
||||
- Module Format: ~/.claude/workflows/cli-templates/prompts/tech/tech-module-format.txt
|
||||
- SKILL Index: ~/.claude/workflows/cli-templates/prompts/tech/tech-skill-index.txt
|
||||
|
||||
**Your Responsibilities**:
|
||||
|
||||
1. **Extract Tech Stack Information**:
|
||||
|
||||
IF MODE == 'session':
|
||||
- Read `.workflow/{SESSION_ID}/workflow-session.json`
|
||||
- Read `.workflow/{SESSION_ID}/.process/context-package.json`
|
||||
- Extract tech_stack: {language, frameworks, libraries}
|
||||
- Build tech stack name: \"{language}-{framework1}-{framework2}\"
|
||||
- Example: \"typescript-react-nextjs\"
|
||||
|
||||
IF MODE == 'direct':
|
||||
- Tech stack name = CONTEXT_PATH
|
||||
- Parse composite: split by '-' delimiter
|
||||
- Example: \"typescript-react-nextjs\" → [\"typescript\", \"react\", \"nextjs\"]
|
||||
|
||||
2. **Execute Exa Research** (4-6 parallel queries):
|
||||
|
||||
Base Queries (always execute):
|
||||
- mcp__exa__get_code_context_exa(query: \"{tech} core principles best practices 2025\", tokensNum: 8000)
|
||||
- mcp__exa__get_code_context_exa(query: \"{tech} common patterns architecture examples\", tokensNum: 7000)
|
||||
- mcp__exa__web_search_exa(query: \"{tech} configuration setup tooling 2025\", numResults: 5)
|
||||
- mcp__exa__get_code_context_exa(query: \"{tech} testing strategies\", tokensNum: 5000)
|
||||
|
||||
Component Queries (if composite):
|
||||
- For each additional component:
|
||||
mcp__exa__get_code_context_exa(query: \"{main_tech} {component} integration\", tokensNum: 5000)
|
||||
|
||||
3. **Read Module Format Template**:
|
||||
|
||||
Read template for structure guidance:
|
||||
```bash
|
||||
Read(~/.claude/workflows/cli-templates/prompts/tech/tech-module-format.txt)
|
||||
```
|
||||
|
||||
4. **Synthesize Content into 6 Modules**:
|
||||
|
||||
Follow template structure from tech-module-format.txt:
|
||||
- **principles.md** - Core concepts, philosophies (~3K tokens)
|
||||
- **patterns.md** - Implementation patterns with code examples (~5K tokens)
|
||||
- **practices.md** - Best practices, anti-patterns, pitfalls (~4K tokens)
|
||||
- **testing.md** - Testing strategies, frameworks (~3K tokens)
|
||||
- **config.md** - Setup, configuration, tooling (~3K tokens)
|
||||
- **frameworks.md** - Framework integration (only if composite, ~4K tokens)
|
||||
|
||||
Each module follows template format:
|
||||
- Frontmatter (YAML)
|
||||
- Main sections with clear headings
|
||||
- Code examples from Exa research
|
||||
- Best practices sections
|
||||
- References to Exa sources
|
||||
|
||||
5. **Write Files Directly**:
|
||||
|
||||
```javascript
|
||||
// Create directory
|
||||
bash(mkdir -p \".claude/skills/{tech_stack_name}\")
|
||||
|
||||
// Write each module file using Write tool
|
||||
Write({ file_path: \".claude/skills/{tech_stack_name}/principles.md\", content: ... })
|
||||
Write({ file_path: \".claude/skills/{tech_stack_name}/patterns.md\", content: ... })
|
||||
Write({ file_path: \".claude/skills/{tech_stack_name}/practices.md\", content: ... })
|
||||
Write({ file_path: \".claude/skills/{tech_stack_name}/testing.md\", content: ... })
|
||||
Write({ file_path: \".claude/skills/{tech_stack_name}/config.md\", content: ... })
|
||||
// Write frameworks.md only if composite
|
||||
|
||||
// Write metadata.json
|
||||
Write({
|
||||
file_path: \".claude/skills/{tech_stack_name}/metadata.json\",
|
||||
content: JSON.stringify({
|
||||
tech_stack_name,
|
||||
components,
|
||||
is_composite,
|
||||
generated_at: timestamp,
|
||||
source: \"exa-research\",
|
||||
research_summary: { total_queries, total_sources }
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
6. **Report Completion**:
|
||||
|
||||
Provide summary:
|
||||
- Tech stack name
|
||||
- Files created (count)
|
||||
- Exa queries executed
|
||||
- Sources consulted
|
||||
|
||||
**CRITICAL**:
|
||||
- MUST read external template files before generating content (step 3 for modules, step 4 for index)
|
||||
- You have FULL autonomy - read files, execute Exa, synthesize content, write files
|
||||
- Do NOT return JSON or structured data - produce actual .md files
|
||||
- Handle errors gracefully (Exa failures, missing files, template read failures)
|
||||
- If tech stack cannot be determined, ask orchestrator to clarify
|
||||
"
|
||||
)
|
||||
```
|
||||
|
||||
**Completion Criteria**:
|
||||
- Agent task executed successfully
|
||||
- 5-6 modular files written to `.claude/skills/{tech_stack_name}/`
|
||||
- metadata.json written
|
||||
- Agent reports completion
|
||||
|
||||
**TodoWrite**: Mark phase 2 completed, phase 3 in_progress
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Generate SKILL.md Index
|
||||
|
||||
**Note**: This phase **ALWAYS executes** - generates or updates the SKILL index.
|
||||
|
||||
**Goal**: Read generated module files and create SKILL.md index with loading recommendations
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. **Verify Generated Files**:
|
||||
```bash
|
||||
bash(find ".claude/skills/${TECH_STACK_NAME}" -name "*.md" -type f | sort)
|
||||
```
|
||||
|
||||
2. **Read metadata.json**:
|
||||
```javascript
|
||||
Read(.claude/skills/${TECH_STACK_NAME}/metadata.json)
|
||||
// Extract: tech_stack_name, components, is_composite, research_summary
|
||||
```
|
||||
|
||||
3. **Read Module Headers** (optional, first 20 lines):
|
||||
```javascript
|
||||
Read(.claude/skills/${TECH_STACK_NAME}/principles.md, limit: 20)
|
||||
// Repeat for other modules
|
||||
```
|
||||
|
||||
4. **Read SKILL Index Template**:
|
||||
|
||||
```javascript
|
||||
Read(~/.claude/workflows/cli-templates/prompts/tech/tech-skill-index.txt)
|
||||
```
|
||||
|
||||
5. **Generate SKILL.md Index**:
|
||||
|
||||
Follow template from tech-skill-index.txt with variable substitutions:
|
||||
- `{TECH_STACK_NAME}`: From metadata.json
|
||||
- `{MAIN_TECH}`: Primary technology
|
||||
- `{ISO_TIMESTAMP}`: Current timestamp
|
||||
- `{QUERY_COUNT}`: From research_summary
|
||||
- `{SOURCE_COUNT}`: From research_summary
|
||||
- Conditional sections for composite tech stacks
|
||||
|
||||
Template provides structure for:
|
||||
- Frontmatter with metadata
|
||||
- Overview and tech stack description
|
||||
- Module organization (Core/Practical/Config sections)
|
||||
- Loading recommendations (Quick/Implementation/Complete)
|
||||
- Usage guidelines and auto-trigger keywords
|
||||
- Research metadata and version history
|
||||
|
||||
6. **Write SKILL.md**:
|
||||
```javascript
|
||||
Write({
|
||||
file_path: `.claude/skills/${TECH_STACK_NAME}/SKILL.md`,
|
||||
content: generatedIndexMarkdown
|
||||
})
|
||||
```
|
||||
|
||||
**Completion Criteria**:
|
||||
- SKILL.md index written
|
||||
- All module files verified
|
||||
- Loading recommendations included
|
||||
|
||||
**TodoWrite**: Mark phase 3 completed
|
||||
|
||||
**Final Report**:
|
||||
```
|
||||
Tech Stack SKILL Package Complete
|
||||
|
||||
Tech Stack: {TECH_STACK_NAME}
|
||||
Location: .claude/skills/{TECH_STACK_NAME}/
|
||||
|
||||
Files: SKILL.md + 5-6 modules + metadata.json
|
||||
Exa Research: {queries} queries, {sources} sources
|
||||
|
||||
Usage: Skill(command: "{TECH_STACK_NAME}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### TodoWrite Patterns
|
||||
|
||||
**Initialization** (Before Phase 1):
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Prepare context paths", "status": "in_progress", "activeForm": "Preparing context paths"},
|
||||
{"content": "Agent produces all module files", "status": "pending", "activeForm": "Agent producing files"},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", "activeForm": "Generating SKILL index"}
|
||||
]})
|
||||
```
|
||||
|
||||
**Full Path** (SKIP_GENERATION = false):
|
||||
```javascript
|
||||
// After Phase 1
|
||||
TodoWrite({todos: [
|
||||
{"content": "Prepare context paths", "status": "completed", ...},
|
||||
{"content": "Agent produces all module files", "status": "in_progress", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "pending", ...}
|
||||
]})
|
||||
|
||||
// After Phase 2
|
||||
TodoWrite({todos: [
|
||||
{"content": "Prepare context paths", "status": "completed", ...},
|
||||
{"content": "Agent produces all module files", "status": "completed", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", ...}
|
||||
]})
|
||||
|
||||
// After Phase 3
|
||||
TodoWrite({todos: [
|
||||
{"content": "Prepare context paths", "status": "completed", ...},
|
||||
{"content": "Agent produces all module files", "status": "completed", ...},
|
||||
{"content": "Generate SKILL.md index", "status": "completed", ...}
|
||||
]})
|
||||
```
|
||||
|
||||
**Skip Path** (SKIP_GENERATION = true):
|
||||
```javascript
|
||||
// After Phase 1 (skip Phase 2)
|
||||
TodoWrite({todos: [
|
||||
{"content": "Prepare context paths", "status": "completed", ...},
|
||||
{"content": "Agent produces all module files", "status": "completed", ...}, // Skipped
|
||||
{"content": "Generate SKILL.md index", "status": "in_progress", ...}
|
||||
]})
|
||||
```
|
||||
|
||||
### Execution Flow
|
||||
|
||||
**Full Path**:
|
||||
```
|
||||
User → TodoWrite Init → Phase 1 (prepare) → Phase 2 (agent writes files) → Phase 3 (write index) → Report
|
||||
```
|
||||
|
||||
**Skip Path**:
|
||||
```
|
||||
User → TodoWrite Init → Phase 1 (detect existing) → Phase 3 (update index) → Report
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase 1 Errors**:
|
||||
- Invalid session ID: Report error, verify session exists
|
||||
- Missing context-package: Warn, fall back to direct mode
|
||||
- No tech stack detected: Ask user to specify tech stack name
|
||||
|
||||
**Phase 2 Errors (Agent)**:
|
||||
- Agent task fails: Retry once, report if fails again
|
||||
- Exa API failures: Agent handles internally with retries
|
||||
- Incomplete results: Warn user, proceed with partial data if minimum sections available
|
||||
|
||||
**Phase 3 Errors**:
|
||||
- Write failures: Report which files failed
|
||||
- Missing files: Note in SKILL.md, suggest regeneration
|
||||
|
||||
---
|
||||
|
||||
## Parameters
|
||||
|
||||
```bash
|
||||
/memory:tech-research [session-id | "tech-stack-name"] [--regenerate] [--tool <gemini|qwen>]
|
||||
```
|
||||
|
||||
**Arguments**:
|
||||
- **session-id | tech-stack-name**: Input source (auto-detected by WFS- prefix)
|
||||
- Session mode: `WFS-user-auth-v2` - Extract tech stack from workflow
|
||||
- Direct mode: `"typescript"`, `"typescript-react-nextjs"` - User specifies
|
||||
- **--regenerate**: Force regenerate existing SKILL (deletes and recreates)
|
||||
- **--tool**: Reserved for future CLI integration (default: gemini)
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
**Generated File Structure** (for all examples):
|
||||
```
|
||||
.claude/skills/{tech-stack}/
|
||||
├── SKILL.md # Index (Phase 3)
|
||||
├── principles.md # Agent (Phase 2)
|
||||
├── patterns.md # Agent
|
||||
├── practices.md # Agent
|
||||
├── testing.md # Agent
|
||||
├── config.md # Agent
|
||||
├── frameworks.md # Agent (if composite)
|
||||
└── metadata.json # Agent
|
||||
```
|
||||
|
||||
### Direct Mode - Single Stack
|
||||
|
||||
```bash
|
||||
/memory:tech-research "typescript"
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects direct mode, checks existing SKILL
|
||||
2. Phase 2: Agent executes 4 Exa queries, writes 5 modules
|
||||
3. Phase 3: Generates SKILL.md index
|
||||
|
||||
### Direct Mode - Composite Stack
|
||||
|
||||
```bash
|
||||
/memory:tech-research "typescript-react-nextjs"
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Decomposes into ["typescript", "react", "nextjs"]
|
||||
2. Phase 2: Agent executes 6 Exa queries (4 base + 2 components), writes 6 modules (adds frameworks.md)
|
||||
3. Phase 3: Generates SKILL.md index with framework integration
|
||||
|
||||
### Session Mode - Extract from Workflow
|
||||
|
||||
```bash
|
||||
/memory:tech-research WFS-user-auth-20251104
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Reads session, extracts tech stack: `python-fastapi-sqlalchemy`
|
||||
2. Phase 2: Agent researches Python + FastAPI + SQLAlchemy, writes 6 modules
|
||||
3. Phase 3: Generates SKILL.md index
|
||||
|
||||
### Regenerate Existing
|
||||
|
||||
```bash
|
||||
/memory:tech-research "react" --regenerate
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Deletes existing SKILL due to --regenerate
|
||||
2. Phase 2: Agent executes fresh Exa research (latest 2025 practices)
|
||||
3. Phase 3: Generates updated SKILL.md
|
||||
|
||||
### Skip Path - Fast Update
|
||||
|
||||
```bash
|
||||
/memory:tech-research "python"
|
||||
```
|
||||
|
||||
**Scenario**: SKILL already exists with 7 files
|
||||
|
||||
**Workflow**:
|
||||
1. Phase 1: Detects existing SKILL, sets SKIP_GENERATION = true
|
||||
2. Phase 2: **SKIPPED**
|
||||
3. Phase 3: Updates SKILL.md index only (5-10x faster)
|
||||
|
||||
|
||||
332
.claude/commands/memory/tips.md
Normal file
332
.claude/commands/memory/tips.md
Normal file
@@ -0,0 +1,332 @@
|
||||
---
|
||||
name: tips
|
||||
description: Quick note-taking command to capture ideas, snippets, reminders, and insights for later reference
|
||||
argument-hint: "<note content> [--tag <tag1,tag2>] [--context <context>]"
|
||||
allowed-tools: mcp__ccw-tools__core_memory(*), Read(*)
|
||||
examples:
|
||||
- /memory:tips "Remember to use Redis for rate limiting"
|
||||
- /memory:tips "Auth pattern: JWT with refresh tokens" --tag architecture,auth
|
||||
- /memory:tips "Bug: memory leak in WebSocket handler after 24h" --context websocket-service
|
||||
- /memory:tips "Performance: lazy loading reduced bundle by 40%" --tag performance
|
||||
---
|
||||
|
||||
# Memory Tips Command (/memory:tips)
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The `memory:tips` command provides **quick note-taking** for capturing:
|
||||
- Quick ideas and insights
|
||||
- Code snippets and patterns
|
||||
- Reminders and follow-ups
|
||||
- Bug notes and debugging hints
|
||||
- Performance observations
|
||||
- Architecture decisions
|
||||
- Library/tool recommendations
|
||||
|
||||
**Core Philosophy**:
|
||||
- **Speed First**: Minimal friction for capturing thoughts
|
||||
- **Searchable**: Tagged for easy retrieval
|
||||
- **Context-Aware**: Optional context linking
|
||||
- **Lightweight**: No complex session analysis
|
||||
|
||||
## 2. Parameters
|
||||
|
||||
- `<note content>` (Required): The tip/note content to save
|
||||
- `--tag <tags>` (Optional): Comma-separated tags for categorization
|
||||
- `--context <context>` (Optional): Related context (file, module, feature)
|
||||
|
||||
**Examples**:
|
||||
```bash
|
||||
/memory:tips "Use Zod for runtime validation - better DX than class-validator"
|
||||
/memory:tips "Redis connection pool: max 10, min 2" --tag config,redis
|
||||
/memory:tips "Fix needed: race condition in payment processor" --tag bug,payment --context src/payments
|
||||
```
|
||||
|
||||
## 3. Structured Output Format
|
||||
|
||||
```markdown
|
||||
## Tip ID
|
||||
TIP-YYYYMMDD-HHMMSS
|
||||
|
||||
## Timestamp
|
||||
YYYY-MM-DD HH:MM:SS
|
||||
|
||||
## Project Root
|
||||
[Absolute path to project root, e.g., D:\Claude_dms3]
|
||||
|
||||
## Content
|
||||
[The tip/note content exactly as provided]
|
||||
|
||||
## Tags
|
||||
[Comma-separated tags, or (none)]
|
||||
|
||||
## Context
|
||||
[Optional context linking - file, module, or feature reference]
|
||||
|
||||
## Session Link
|
||||
[WFS-ID if workflow session active, otherwise (none)]
|
||||
|
||||
## Auto-Detected Context
|
||||
[Files/topics from current conversation if relevant]
|
||||
```
|
||||
|
||||
## 4. Field Definitions
|
||||
|
||||
| Field | Purpose | Example |
|
||||
|-------|---------|---------|
|
||||
| **Tip ID** | Unique identifier with timestamp | TIP-20260128-143052 |
|
||||
| **Timestamp** | When tip was created | 2026-01-28 14:30:52 |
|
||||
| **Project Root** | Current project path | D:\Claude_dms3 |
|
||||
| **Content** | The actual tip/note | "Use Redis for rate limiting" |
|
||||
| **Tags** | Categorization labels | architecture, auth, performance |
|
||||
| **Context** | Related code/feature | src/auth/**, payment-module |
|
||||
| **Session Link** | Link to workflow session | WFS-auth-20260128 |
|
||||
| **Auto-Detected Context** | Files from conversation | src/api/handler.ts |
|
||||
|
||||
## 5. Execution Flow
|
||||
|
||||
### Step 1: Parse Arguments
|
||||
|
||||
```javascript
|
||||
const parseTipsCommand = (input) => {
|
||||
// Extract note content (everything before flags)
|
||||
const contentMatch = input.match(/^"([^"]+)"|^([^\s-]+)/);
|
||||
const content = contentMatch ? (contentMatch[1] || contentMatch[2]) : '';
|
||||
|
||||
// Extract tags
|
||||
const tagsMatch = input.match(/--tag\s+([^\s-]+)/);
|
||||
const tags = tagsMatch ? tagsMatch[1].split(',').map(t => t.trim()) : [];
|
||||
|
||||
// Extract context
|
||||
const contextMatch = input.match(/--context\s+([^\s-]+)/);
|
||||
const context = contextMatch ? contextMatch[1] : '';
|
||||
|
||||
return { content, tags, context };
|
||||
};
|
||||
```
|
||||
|
||||
### Step 2: Gather Context
|
||||
|
||||
```javascript
|
||||
const gatherTipContext = async () => {
|
||||
// Get project root
|
||||
const projectRoot = process.cwd(); // or detect from environment
|
||||
|
||||
// Get current session if active
|
||||
const manifest = await mcp__ccw-tools__session_manager({
|
||||
operation: "list",
|
||||
location: "active"
|
||||
});
|
||||
const sessionId = manifest.sessions?.[0]?.id || null;
|
||||
|
||||
// Auto-detect files from recent conversation
|
||||
const recentFiles = extractRecentFilesFromConversation(); // Last 5 messages
|
||||
|
||||
return {
|
||||
projectRoot,
|
||||
sessionId,
|
||||
autoDetectedContext: recentFiles
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Step 3: Generate Structured Text
|
||||
|
||||
```javascript
|
||||
const generateTipText = (parsed, context) => {
|
||||
const timestamp = new Date().toISOString().replace('T', ' ').slice(0, 19);
|
||||
const tipId = `TIP-${new Date().toISOString().slice(0,10).replace(/-/g, '')}-${new Date().toTimeString().slice(0,8).replace(/:/g, '')}`;
|
||||
|
||||
return `## Tip ID
|
||||
${tipId}
|
||||
|
||||
## Timestamp
|
||||
${timestamp}
|
||||
|
||||
## Project Root
|
||||
${context.projectRoot}
|
||||
|
||||
## Content
|
||||
${parsed.content}
|
||||
|
||||
## Tags
|
||||
${parsed.tags.length > 0 ? parsed.tags.join(', ') : '(none)'}
|
||||
|
||||
## Context
|
||||
${parsed.context || '(none)'}
|
||||
|
||||
## Session Link
|
||||
${context.sessionId || '(none)'}
|
||||
|
||||
## Auto-Detected Context
|
||||
${context.autoDetectedContext.length > 0
|
||||
? context.autoDetectedContext.map(f => `- ${f}`).join('\n')
|
||||
: '(none)'}`;
|
||||
};
|
||||
```
|
||||
|
||||
### Step 4: Save to Core Memory
|
||||
|
||||
```javascript
|
||||
mcp__ccw-tools__core_memory({
|
||||
operation: "import",
|
||||
text: structuredText
|
||||
})
|
||||
```
|
||||
|
||||
**Response Format**:
|
||||
```json
|
||||
{
|
||||
"operation": "import",
|
||||
"id": "CMEM-YYYYMMDD-HHMMSS",
|
||||
"message": "Created memory: CMEM-YYYYMMDD-HHMMSS"
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Confirm to User
|
||||
|
||||
```
|
||||
✓ Tip saved successfully
|
||||
|
||||
ID: CMEM-YYYYMMDD-HHMMSS
|
||||
Tags: architecture, auth
|
||||
Context: src/auth/**
|
||||
|
||||
To retrieve: /memory:search "auth patterns"
|
||||
Or via MCP: core_memory(operation="search", query="auth")
|
||||
```
|
||||
|
||||
## 6. Tag Categories (Suggested)
|
||||
|
||||
**Technical**:
|
||||
- `architecture` - Design decisions and patterns
|
||||
- `performance` - Optimization insights
|
||||
- `security` - Security considerations
|
||||
- `bug` - Bug notes and fixes
|
||||
- `config` - Configuration settings
|
||||
- `api` - API design patterns
|
||||
|
||||
**Development**:
|
||||
- `testing` - Test strategies and patterns
|
||||
- `debugging` - Debugging techniques
|
||||
- `refactoring` - Refactoring notes
|
||||
- `documentation` - Doc improvements
|
||||
|
||||
**Domain Specific**:
|
||||
- `auth` - Authentication/authorization
|
||||
- `database` - Database patterns
|
||||
- `frontend` - UI/UX patterns
|
||||
- `backend` - Backend logic
|
||||
- `devops` - Infrastructure and deployment
|
||||
|
||||
**Organizational**:
|
||||
- `reminder` - Follow-up items
|
||||
- `research` - Research findings
|
||||
- `idea` - Feature ideas
|
||||
- `review` - Code review notes
|
||||
|
||||
## 7. Search Integration
|
||||
|
||||
Tips can be retrieved using:
|
||||
|
||||
```bash
|
||||
# Via command (if /memory:search exists)
|
||||
/memory:search "rate limiting"
|
||||
|
||||
# Via MCP tool
|
||||
mcp__ccw-tools__core_memory({
|
||||
operation: "search",
|
||||
query: "rate limiting",
|
||||
source_type: "core_memory",
|
||||
top_k: 10
|
||||
})
|
||||
|
||||
# Via CLI
|
||||
ccw core-memory search --query "rate limiting" --top-k 10
|
||||
```
|
||||
|
||||
## 8. Quality Checklist
|
||||
|
||||
Before saving:
|
||||
- [ ] Content is clear and actionable
|
||||
- [ ] Tags are relevant and consistent
|
||||
- [ ] Context provides enough reference
|
||||
- [ ] Auto-detected context is accurate
|
||||
- [ ] Project root is absolute path
|
||||
- [ ] Timestamp is properly formatted
|
||||
|
||||
## 9. Best Practices
|
||||
|
||||
### Good Tips Examples
|
||||
|
||||
✅ **Specific and Actionable**:
|
||||
```
|
||||
"Use connection pooling for Redis: { max: 10, min: 2, acquireTimeoutMillis: 30000 }"
|
||||
--tag config,redis
|
||||
```
|
||||
|
||||
✅ **With Context**:
|
||||
```
|
||||
"Auth middleware must validate both access and refresh tokens"
|
||||
--tag security,auth --context src/middleware/auth.ts
|
||||
```
|
||||
|
||||
✅ **Problem + Solution**:
|
||||
```
|
||||
"Memory leak fixed by unsubscribing event listeners in componentWillUnmount"
|
||||
--tag bug,react --context src/components/Chat.tsx
|
||||
```
|
||||
|
||||
### Poor Tips Examples
|
||||
|
||||
❌ **Too Vague**:
|
||||
```
|
||||
"Fix the bug" --tag bug
|
||||
```
|
||||
|
||||
❌ **Too Long** (use /memory:compact instead):
|
||||
```
|
||||
"Here's the complete implementation plan for the entire auth system... [3 paragraphs]"
|
||||
```
|
||||
|
||||
❌ **No Context**:
|
||||
```
|
||||
"Remember to update this later"
|
||||
```
|
||||
|
||||
## 10. Use Cases
|
||||
|
||||
### During Development
|
||||
```bash
|
||||
/memory:tips "JWT secret must be 256-bit minimum" --tag security,auth
|
||||
/memory:tips "Use debounce (300ms) for search input" --tag performance,ux
|
||||
```
|
||||
|
||||
### After Bug Fixes
|
||||
```bash
|
||||
/memory:tips "Race condition in payment: lock with Redis SETNX" --tag bug,payment
|
||||
```
|
||||
|
||||
### Code Review Insights
|
||||
```bash
|
||||
/memory:tips "Prefer early returns over nested ifs" --tag style,readability
|
||||
```
|
||||
|
||||
### Architecture Decisions
|
||||
```bash
|
||||
/memory:tips "Chose PostgreSQL over MongoDB for ACID compliance" --tag architecture,database
|
||||
```
|
||||
|
||||
### Library Recommendations
|
||||
```bash
|
||||
/memory:tips "Zod > Yup for TypeScript validation - better type inference" --tag library,typescript
|
||||
```
|
||||
|
||||
## 11. Notes
|
||||
|
||||
- **Frequency**: Use liberally - capture all valuable insights
|
||||
- **Retrieval**: Search by tags, content, or context
|
||||
- **Lifecycle**: Tips persist across sessions
|
||||
- **Organization**: Tags enable filtering and categorization
|
||||
- **Integration**: Can reference tips in later workflows
|
||||
- **Lightweight**: No complex session analysis required
|
||||
@@ -36,13 +36,12 @@ Orchestrates project-wide CLAUDE.md updates using batched agent execution with a
|
||||
- **Use Case**: Deepest directories with unstructured file layouts
|
||||
- **Behavior**: Generates CLAUDE.md for current directory AND each subdirectory containing files
|
||||
- **Context**: All files in current directory tree (`@**/*`)
|
||||
- **Benefits**: Creates foundation documentation for upper layers to reference
|
||||
|
||||
|
||||
#### Single-Layer Strategy (Layers 1-2)
|
||||
- **Use Case**: Upper layers that aggregate from existing documentation
|
||||
- **Behavior**: Generates CLAUDE.md only for current directory
|
||||
- **Context**: Direct children CLAUDE.md files + current directory code files
|
||||
- **Benefits**: Minimal context consumption, clear layer separation
|
||||
|
||||
### Example Flow
|
||||
```
|
||||
@@ -95,14 +94,15 @@ src/ (depth 1) → SINGLE-LAYER STRATEGY
|
||||
|
||||
### Phase 1: Discovery & Analysis
|
||||
|
||||
```bash
|
||||
# Cache git changes
|
||||
bash(git add -A 2>/dev/null || true)
|
||||
```javascript
|
||||
// Cache git changes
|
||||
Bash({command: "git add -A 2>/dev/null || true", run_in_background: false});
|
||||
|
||||
# Get module structure
|
||||
bash(~/.claude/scripts/get_modules_by_depth.sh list)
|
||||
# OR with --path
|
||||
bash(cd <target-path> && ~/.claude/scripts/get_modules_by_depth.sh list)
|
||||
// Get module structure
|
||||
Bash({command: "ccw tool exec get_modules_by_depth '{\"format\":\"list\"}'", run_in_background: false});
|
||||
|
||||
// OR with --path
|
||||
Bash({command: "cd <target-path> && ccw tool exec get_modules_by_depth '{\"format\":\"list\"}'", run_in_background: false});
|
||||
```
|
||||
|
||||
**Parse output** `depth:N|path:<PATH>|...` to extract module paths and count.
|
||||
@@ -172,26 +172,23 @@ Update Plan:
|
||||
|
||||
**Strategy**: Parallel execution within layer (max 4 concurrent), no agent overhead.
|
||||
|
||||
```javascript
|
||||
// Group modules by LAYER (not depth)
|
||||
let modules_by_layer = group_by_layer(module_list);
|
||||
let tool_order = construct_tool_order(primary_tool);
|
||||
**CRITICAL**: All Bash commands use `run_in_background: false` for synchronous execution.
|
||||
|
||||
// Process by LAYER (3 → 2 → 1), not by depth
|
||||
```javascript
|
||||
for (let layer of [3, 2, 1]) {
|
||||
if (modules_by_layer[layer].length === 0) continue;
|
||||
|
||||
let batches = batch_modules(modules_by_layer[layer], 4);
|
||||
|
||||
for (let batch of batches) {
|
||||
let parallel_tasks = batch.map(module => {
|
||||
return async () => {
|
||||
// Auto-determine strategy based on depth
|
||||
let strategy = module.depth >= 3 ? "multi-layer" : "single-layer";
|
||||
|
||||
for (let tool of tool_order) {
|
||||
let exit_code = bash(`cd ${module.path} && ~/.claude/scripts/update_module_claude.sh "${strategy}" "." "${tool}"`);
|
||||
if (exit_code === 0) {
|
||||
Bash({
|
||||
command: `cd ${module.path} && ccw tool exec update_module_claude '{"strategy":"${strategy}","path":".","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ ${module.path} (Layer ${layer}) updated with ${tool}`);
|
||||
return true;
|
||||
}
|
||||
@@ -200,7 +197,6 @@ for (let layer of [3, 2, 1]) {
|
||||
return false;
|
||||
};
|
||||
});
|
||||
|
||||
await Promise.all(parallel_tasks.map(task => task()));
|
||||
}
|
||||
}
|
||||
@@ -248,14 +244,17 @@ MODULES:
|
||||
|
||||
TOOLS (try in order): {{tool_1}}, {{tool_2}}, {{tool_3}}
|
||||
|
||||
EXECUTION SCRIPT: ~/.claude/scripts/update_module_claude.sh
|
||||
EXECUTION SCRIPT: ccw tool exec update_module_claude
|
||||
- Accepts strategy parameter: multi-layer | single-layer
|
||||
- Tool execution via direct CLI commands (gemini/qwen/codex)
|
||||
|
||||
EXECUTION FLOW (for each module):
|
||||
1. Tool fallback loop (exit on first success):
|
||||
for tool in {{tool_1}} {{tool_2}} {{tool_3}}; do
|
||||
bash(cd "{{module_path}}" && ~/.claude/scripts/update_module_claude.sh "{{strategy}}" "." "${tool}")
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec update_module_claude '{"strategy":"{{strategy}}","path":".","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
@@ -287,12 +286,12 @@ REPORTING FORMAT:
|
||||
```
|
||||
### Phase 4: Safety Verification
|
||||
|
||||
```bash
|
||||
# Check only CLAUDE.md modified
|
||||
bash(git diff --cached --name-only | grep -v "CLAUDE.md" || echo "Only CLAUDE.md files modified")
|
||||
```javascript
|
||||
// Check only CLAUDE.md files modified
|
||||
Bash({command: 'git diff --cached --name-only | grep -v "CLAUDE.md" || echo "Only CLAUDE.md files modified"', run_in_background: false});
|
||||
|
||||
# Display status
|
||||
bash(git status --short)
|
||||
// Display status
|
||||
Bash({command: "git status --short", run_in_background: false});
|
||||
```
|
||||
|
||||
**Result Summary**:
|
||||
|
||||
@@ -39,12 +39,12 @@ Orchestrates context-aware CLAUDE.md updates for changed modules using batched a
|
||||
|
||||
## Phase 1: Change Detection & Analysis
|
||||
|
||||
```bash
|
||||
# Detect changed modules (no index refresh needed)
|
||||
bash(~/.claude/scripts/detect_changed_modules.sh list)
|
||||
```javascript
|
||||
// Detect changed modules
|
||||
Bash({command: "ccw tool exec detect_changed_modules '{\"format\":\"list\"}'", run_in_background: false});
|
||||
|
||||
# Cache git changes
|
||||
bash(git add -A 2>/dev/null || true)
|
||||
// Cache git changes
|
||||
Bash({command: "git add -A 2>/dev/null || true", run_in_background: false});
|
||||
```
|
||||
|
||||
**Parse output** `depth:N|path:<PATH>|change:<TYPE>` to extract affected modules.
|
||||
@@ -89,47 +89,36 @@ Related Update Plan:
|
||||
|
||||
## Phase 3A: Direct Execution (<15 modules)
|
||||
|
||||
**Strategy**: Parallel execution within depth (max 4 concurrent), no agent overhead, tool fallback per module.
|
||||
**Strategy**: Parallel execution within depth (max 4 concurrent), no agent overhead.
|
||||
|
||||
**CRITICAL**: All Bash commands use `run_in_background: false` for synchronous execution.
|
||||
|
||||
```javascript
|
||||
let modules_by_depth = group_by_depth(changed_modules);
|
||||
let tool_order = construct_tool_order(primary_tool);
|
||||
|
||||
for (let depth of sorted_depths.reverse()) { // N → 0
|
||||
let modules = modules_by_depth[depth];
|
||||
let batches = batch_modules(modules, 4); // Split into groups of 4
|
||||
let batches = batch_modules(modules_by_depth[depth], 4);
|
||||
|
||||
for (let batch of batches) {
|
||||
// Execute batch in parallel (max 4 concurrent)
|
||||
let parallel_tasks = batch.map(module => {
|
||||
return async () => {
|
||||
let success = false;
|
||||
for (let tool of tool_order) {
|
||||
let exit_code = bash(cd ${module.path} && ~/.claude/scripts/update_module_claude.sh "single-layer" "." "${tool}");
|
||||
if (exit_code === 0) {
|
||||
report("${module.path} updated with ${tool}");
|
||||
success = true;
|
||||
break;
|
||||
Bash({
|
||||
command: `cd ${module.path} && ccw tool exec update_module_claude '{"strategy":"single-layer","path":".","tool":"${tool}"}'`,
|
||||
run_in_background: false
|
||||
});
|
||||
if (bash_result.exit_code === 0) {
|
||||
report(`✅ ${module.path} updated with ${tool}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
report("FAILED: ${module.path} failed all tools");
|
||||
}
|
||||
report(`❌ FAILED: ${module.path} failed all tools`);
|
||||
return false;
|
||||
};
|
||||
});
|
||||
|
||||
await Promise.all(parallel_tasks.map(task => task())); // Run batch in parallel
|
||||
await Promise.all(parallel_tasks.map(task => task()));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- No agent startup overhead
|
||||
- Parallel execution within depth (max 4 concurrent)
|
||||
- Tool fallback still applies per module
|
||||
- Faster for small changesets (<15 modules)
|
||||
- Same batching strategy as Phase 3B but without agent layer
|
||||
|
||||
---
|
||||
|
||||
## Phase 3B: Agent Batch Execution (≥15 modules)
|
||||
@@ -193,19 +182,27 @@ TOOLS (try in order):
|
||||
|
||||
EXECUTION:
|
||||
For each module above:
|
||||
1. cd "{{module_path}}"
|
||||
2. Try tool 1:
|
||||
bash(cd "{{module_path}}" && ~/.claude/scripts/update_module_claude.sh "single-layer" "." "{{tool_1}}")
|
||||
→ Success: Report "{{module_path}} updated with {{tool_1}}", proceed to next module
|
||||
1. Try tool 1:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec update_module_claude '{"strategy":"single-layer","path":".","tool":"{{tool_1}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} updated with {{tool_1}}", proceed to next module
|
||||
→ Failure: Try tool 2
|
||||
3. Try tool 2:
|
||||
bash(cd "{{module_path}}" && ~/.claude/scripts/update_module_claude.sh "single-layer" "." "{{tool_2}}")
|
||||
→ Success: Report "{{module_path}} updated with {{tool_2}}", proceed to next module
|
||||
2. Try tool 2:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec update_module_claude '{"strategy":"single-layer","path":".","tool":"{{tool_2}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} updated with {{tool_2}}", proceed to next module
|
||||
→ Failure: Try tool 3
|
||||
4. Try tool 3:
|
||||
bash(cd "{{module_path}}" && ~/.claude/scripts/update_module_claude.sh "single-layer" "." "{{tool_3}}")
|
||||
→ Success: Report "{{module_path}} updated with {{tool_3}}", proceed to next module
|
||||
→ Failure: Report "FAILED: {{module_path}} failed all tools", proceed to next module
|
||||
3. Try tool 3:
|
||||
Bash({
|
||||
command: `cd "{{module_path}}" && ccw tool exec update_module_claude '{"strategy":"single-layer","path":".","tool":"{{tool_3}}"}'`,
|
||||
run_in_background: false
|
||||
})
|
||||
→ Success: Report "✅ {{module_path}} updated with {{tool_3}}", proceed to next module
|
||||
→ Failure: Report "❌ FAILED: {{module_path}} failed all tools", proceed to next module
|
||||
|
||||
REPORTING:
|
||||
Report final summary with:
|
||||
@@ -213,30 +210,16 @@ Report final summary with:
|
||||
- Successful: Y modules
|
||||
- Failed: Z modules
|
||||
- Tool usage: {{tool_1}}:X, {{tool_2}}:Y, {{tool_3}}:Z
|
||||
- Detailed results for each module
|
||||
```
|
||||
|
||||
### Example Execution
|
||||
|
||||
**Depth 3 (new module)**:
|
||||
```javascript
|
||||
Task(subagent_type="memory-bridge", batch=[./src/api/auth], mode="related")
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- 4 modules → 1 agent (75% reduction)
|
||||
- Parallel batches, sequential within batch
|
||||
- Each module gets full fallback chain
|
||||
- Context-aware updates based on git changes
|
||||
|
||||
## Phase 4: Safety Verification
|
||||
|
||||
```bash
|
||||
# Check only CLAUDE.md modified
|
||||
bash(git diff --cached --name-only | grep -v "CLAUDE.md" || echo "Only CLAUDE.md files modified")
|
||||
```javascript
|
||||
// Check only CLAUDE.md modified
|
||||
Bash({command: 'git diff --cached --name-only | grep -v "CLAUDE.md" || echo "Only CLAUDE.md files modified"', run_in_background: false});
|
||||
|
||||
# Display statistics
|
||||
bash(git diff --stat)
|
||||
// Display statistics
|
||||
Bash({command: "git diff --stat", run_in_background: false});
|
||||
```
|
||||
|
||||
**Aggregate results**:
|
||||
|
||||
@@ -1,517 +0,0 @@
|
||||
---
|
||||
name: workflow-skill-memory
|
||||
description: Process WFS-* archived sessions using universal-executor agents with Gemini analysis to generate workflow-progress SKILL package (sessions-timeline, lessons, conflicts)
|
||||
argument-hint: "session <session-id> | all"
|
||||
allowed-tools: Task(*), TodoWrite(*), Bash(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
# Workflow SKILL Memory Generator
|
||||
|
||||
## Overview
|
||||
|
||||
Generate SKILL package from archived workflow sessions using agent-driven analysis. Supports single-session incremental updates or parallel processing of all sessions.
|
||||
|
||||
**Scope**: Only processes WFS-* workflow sessions. Other session types (e.g., doc sessions) are automatically ignored.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/memory:workflow-skill-memory session WFS-<session-id> # Process single WFS session
|
||||
/memory:workflow-skill-memory all # Process all WFS sessions in parallel
|
||||
```
|
||||
|
||||
## Execution Modes
|
||||
|
||||
### Mode 1: Single Session (`session <session-id>`)
|
||||
|
||||
**Purpose**: Incremental update - process one archived session and merge into existing SKILL package
|
||||
|
||||
**Workflow**:
|
||||
1. **Validate session**: Check if session exists in `.workflow/.archives/{session-id}/`
|
||||
2. **Invoke agent**: Call `universal-executor` to analyze session and update SKILL documents
|
||||
3. **Agent tasks**:
|
||||
- Read session data from `.workflow/.archives/{session-id}/`
|
||||
- Extract lessons, conflicts, and outcomes
|
||||
- Use Gemini for intelligent aggregation (optional)
|
||||
- Update or create SKILL documents using templates
|
||||
- Regenerate SKILL.md index
|
||||
|
||||
**Command Example**:
|
||||
```bash
|
||||
/memory:workflow-skill-memory session WFS-user-auth
|
||||
```
|
||||
|
||||
**Expected Output**:
|
||||
```
|
||||
Session WFS-user-auth processed
|
||||
Updated:
|
||||
- sessions-timeline.md (1 session added)
|
||||
- lessons-learned.md (3 lessons merged)
|
||||
- conflict-patterns.md (1 conflict added)
|
||||
- SKILL.md (index regenerated)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Mode 2: All Sessions (`all`)
|
||||
|
||||
**Purpose**: Full regeneration - process all archived sessions in parallel for complete SKILL package
|
||||
|
||||
**Workflow**:
|
||||
1. **List sessions**: Read manifest.json to get all archived session IDs
|
||||
2. **Parallel invocation**: Launch multiple `universal-executor` agents in parallel (one per session)
|
||||
3. **Agent coordination**:
|
||||
- Each agent processes one session independently
|
||||
- Agents use Gemini for analysis
|
||||
- Agents collect data into JSON (no direct file writes)
|
||||
- Final aggregator agent merges results and generates SKILL documents
|
||||
|
||||
**Command Example**:
|
||||
```bash
|
||||
/memory:workflow-skill-memory all
|
||||
```
|
||||
|
||||
**Expected Output**:
|
||||
```
|
||||
All sessions processed in parallel
|
||||
Sessions: 8 total
|
||||
Updated:
|
||||
- sessions-timeline.md (8 sessions)
|
||||
- lessons-learned.md (24 lessons aggregated)
|
||||
- conflict-patterns.md (12 conflicts documented)
|
||||
- SKILL.md (index regenerated)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Flow
|
||||
|
||||
### Phase 1: Validation and Setup
|
||||
|
||||
**Step 1.1: Parse Command Arguments**
|
||||
|
||||
Extract mode and session ID:
|
||||
```javascript
|
||||
if (args === "all") {
|
||||
mode = "all"
|
||||
} else if (args.startsWith("session ")) {
|
||||
mode = "session"
|
||||
session_id = args.replace("session ", "").trim()
|
||||
} else {
|
||||
ERROR = "Invalid arguments. Usage: session <session-id> | all"
|
||||
EXIT
|
||||
}
|
||||
```
|
||||
|
||||
**Step 1.2: Validate Archive Directory**
|
||||
```bash
|
||||
bash(test -d .workflow/.archives && echo "exists" || echo "missing")
|
||||
```
|
||||
|
||||
If missing, report error and exit.
|
||||
|
||||
**Step 1.3: Mode-Specific Validation**
|
||||
|
||||
**Single Session Mode**:
|
||||
```bash
|
||||
# Validate session ID format (must start with WFS-)
|
||||
if [[ ! "$session_id" =~ ^WFS- ]]; then
|
||||
ERROR = "Invalid session ID format. Only WFS-* sessions are supported"
|
||||
EXIT
|
||||
fi
|
||||
|
||||
# Check if session exists
|
||||
bash(test -d .workflow/.archives/{session_id} && echo "exists" || echo "missing")
|
||||
```
|
||||
|
||||
If missing, report error: "Session {session_id} not found in archives"
|
||||
|
||||
**All Sessions Mode**:
|
||||
```bash
|
||||
# Read manifest and filter only WFS- sessions
|
||||
bash(cat .workflow/.archives/manifest.json | jq -r '.archives[].session_id | select(startswith("WFS-"))')
|
||||
```
|
||||
|
||||
Store filtered session IDs in array. Ignore doc sessions and other non-WFS sessions.
|
||||
|
||||
**Step 1.4: TodoWrite Initialization**
|
||||
|
||||
**Single Session Mode**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Validate session existence", "status": "completed", "activeForm": "Validating session"},
|
||||
{"content": "Invoke agent to process session", "status": "in_progress", "activeForm": "Invoking agent"},
|
||||
{"content": "Verify SKILL package updated", "status": "pending", "activeForm": "Verifying update"}
|
||||
]})
|
||||
```
|
||||
|
||||
**All Sessions Mode**:
|
||||
```javascript
|
||||
TodoWrite({todos: [
|
||||
{"content": "Read manifest and list sessions", "status": "completed", "activeForm": "Reading manifest"},
|
||||
{"content": "Invoke agents in parallel", "status": "in_progress", "activeForm": "Invoking agents"},
|
||||
{"content": "Verify SKILL package regenerated", "status": "pending", "activeForm": "Verifying regeneration"}
|
||||
]})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Agent Invocation
|
||||
|
||||
#### Single Session Mode - Agent Task
|
||||
|
||||
Invoke `universal-executor` with session-specific task:
|
||||
|
||||
**Agent Prompt Structure**:
|
||||
```
|
||||
Task: Process Workflow Session for SKILL Package
|
||||
|
||||
Context:
|
||||
- Session ID: {session_id}
|
||||
- Session Path: .workflow/.archives/{session_id}/
|
||||
- Mode: Incremental update
|
||||
|
||||
Objectives:
|
||||
|
||||
1. Read session data:
|
||||
- workflow-session.json (metadata)
|
||||
- IMPL_PLAN.md (implementation summary)
|
||||
- TODO_LIST.md (if exists)
|
||||
- manifest.json entry for lessons
|
||||
|
||||
2. Extract key information:
|
||||
- Description, tags, metrics
|
||||
- Lessons (successes, challenges, watch_patterns)
|
||||
- Context package path (reference only)
|
||||
- Key outcomes from IMPL_PLAN
|
||||
|
||||
3. Use Gemini for aggregation (optional):
|
||||
Command pattern:
|
||||
cd .workflow/.archives/{session_id} && gemini -p "
|
||||
PURPOSE: Extract lessons and conflicts from workflow session
|
||||
TASK:
|
||||
• Analyze IMPL_PLAN and lessons from manifest
|
||||
• Identify success patterns and challenges
|
||||
• Extract conflict patterns with resolutions
|
||||
• Categorize by functional domain
|
||||
MODE: analysis
|
||||
CONTEXT: @IMPL_PLAN.md @workflow-session.json
|
||||
EXPECTED: Structured lessons and conflicts in JSON format
|
||||
RULES: Template reference from skill-aggregation.txt
|
||||
"
|
||||
|
||||
3.5. **Generate SKILL.md Description** (CRITICAL for auto-loading):
|
||||
|
||||
Read skill-index.txt template Section: "Description Field Generation"
|
||||
|
||||
Execute command to get project root:
|
||||
```bash
|
||||
git rev-parse --show-toplevel # Example output: /d/Claude_dms3
|
||||
```
|
||||
|
||||
Apply description format:
|
||||
```
|
||||
Progressive workflow development history (located at {project_root}).
|
||||
Load this SKILL when continuing development, analyzing past implementations,
|
||||
or learning from workflow history, especially when no relevant context exists in memory.
|
||||
```
|
||||
|
||||
**Validation**:
|
||||
- [ ] Path uses forward slashes (not backslashes)
|
||||
- [ ] All three use cases present
|
||||
- [ ] Trigger optimization phrase included
|
||||
- [ ] Path is absolute (starts with / or drive letter)
|
||||
|
||||
4. Read templates for formatting guidance:
|
||||
- ~/.claude/workflows/cli-templates/prompts/workflow/skill-sessions-timeline.txt
|
||||
- ~/.claude/workflows/cli-templates/prompts/workflow/skill-lessons-learned.txt
|
||||
- ~/.claude/workflows/cli-templates/prompts/workflow/skill-conflict-patterns.txt
|
||||
- ~/.claude/workflows/cli-templates/prompts/workflow/skill-index.txt
|
||||
|
||||
**CRITICAL**: From skill-index.txt, read these sections:
|
||||
- "Description Field Generation" - Rules for generating description
|
||||
- "Variable Substitution Guide" - All required variables
|
||||
- "Generation Instructions" - Step-by-step generation process
|
||||
- "Validation Checklist" - Final validation steps
|
||||
|
||||
5. Update SKILL documents:
|
||||
- sessions-timeline.md: Append new session, update domain grouping
|
||||
- lessons-learned.md: Merge lessons into categories, update frequencies
|
||||
- conflict-patterns.md: Add conflicts, update recurring pattern frequencies
|
||||
- SKILL.md: Regenerate index with updated counts
|
||||
|
||||
**For SKILL.md generation**:
|
||||
- Follow "Generation Instructions" from skill-index.txt (Steps 1-7)
|
||||
- Use git command for project_root: `git rev-parse --show-toplevel`
|
||||
- Apply "Description Field Generation" rules
|
||||
- Validate using "Validation Checklist"
|
||||
- Increment version (patch level)
|
||||
|
||||
6. Return result JSON:
|
||||
{
|
||||
"status": "success",
|
||||
"session_id": "{session_id}",
|
||||
"updates": {
|
||||
"sessions_added": 1,
|
||||
"lessons_merged": count,
|
||||
"conflicts_added": count
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### All Sessions Mode - Parallel Agent Tasks
|
||||
|
||||
**Step 2.1: Launch parallel session analyzers**
|
||||
|
||||
Invoke multiple agents in parallel (one message with multiple Task calls):
|
||||
|
||||
**Per-Session Agent Prompt**:
|
||||
```
|
||||
Task: Extract Session Data for SKILL Package
|
||||
|
||||
Context:
|
||||
- Session ID: {session_id}
|
||||
- Mode: Parallel analysis (no direct file writes)
|
||||
|
||||
Objectives:
|
||||
|
||||
1. Read session data (same as single mode)
|
||||
|
||||
2. Extract key information (same as single mode)
|
||||
|
||||
3. Use Gemini for analysis (same as single mode)
|
||||
|
||||
4. Return structured data JSON:
|
||||
{
|
||||
"status": "success",
|
||||
"session_id": "{session_id}",
|
||||
"data": {
|
||||
"metadata": {
|
||||
"description": "...",
|
||||
"archived_at": "...",
|
||||
"tags": [...],
|
||||
"metrics": {...}
|
||||
},
|
||||
"lessons": {
|
||||
"successes": [...],
|
||||
"challenges": [...],
|
||||
"watch_patterns": [...]
|
||||
},
|
||||
"conflicts": [
|
||||
{
|
||||
"type": "architecture|dependencies|testing|performance",
|
||||
"pattern": "...",
|
||||
"resolution": "...",
|
||||
"code_impact": [...]
|
||||
}
|
||||
],
|
||||
"impl_summary": "First 200 chars of IMPL_PLAN",
|
||||
"context_package_path": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2.2: Aggregate results**
|
||||
|
||||
After all session agents complete, invoke aggregator agent:
|
||||
|
||||
**Aggregator Agent Prompt**:
|
||||
```
|
||||
Task: Aggregate Session Results and Generate SKILL Package
|
||||
|
||||
Context:
|
||||
- Mode: Full regeneration
|
||||
- Input: JSON results from {session_count} session agents
|
||||
|
||||
Objectives:
|
||||
|
||||
1. Aggregate all session data:
|
||||
- Collect metadata from all sessions
|
||||
- Merge lessons by category
|
||||
- Group conflicts by type
|
||||
- Sort sessions by date
|
||||
|
||||
2. Use Gemini for final aggregation:
|
||||
gemini -p "
|
||||
PURPOSE: Aggregate lessons and conflicts from all workflow sessions
|
||||
TASK:
|
||||
• Group successes by functional domain
|
||||
• Categorize challenges by severity (HIGH/MEDIUM/LOW)
|
||||
• Identify recurring conflict patterns
|
||||
• Calculate frequencies and prioritize
|
||||
MODE: analysis
|
||||
CONTEXT: [Provide aggregated JSON data]
|
||||
EXPECTED: Final aggregated structure for SKILL documents
|
||||
RULES: Template reference from skill-aggregation.txt
|
||||
"
|
||||
|
||||
3. Read templates for formatting (same 4 templates as single mode)
|
||||
|
||||
4. Generate all SKILL documents:
|
||||
- sessions-timeline.md (all sessions, sorted by date)
|
||||
- lessons-learned.md (aggregated lessons with frequencies)
|
||||
- conflict-patterns.md (recurring patterns with resolutions)
|
||||
- SKILL.md (index with progressive loading)
|
||||
|
||||
5. Write files to .claude/skills/workflow-progress/
|
||||
|
||||
6. Return result JSON:
|
||||
{
|
||||
"status": "success",
|
||||
"sessions_processed": count,
|
||||
"files_generated": ["SKILL.md", "sessions-timeline.md", ...],
|
||||
"summary": {
|
||||
"total_sessions": count,
|
||||
"functional_domains": [...],
|
||||
"date_range": "...",
|
||||
"lessons_count": count,
|
||||
"conflicts_count": count
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Verification
|
||||
|
||||
**Step 3.1: Check SKILL Package Files**
|
||||
```bash
|
||||
bash(ls -lh .claude/skills/workflow-progress/)
|
||||
```
|
||||
|
||||
Verify all 4 files exist:
|
||||
- SKILL.md
|
||||
- sessions-timeline.md
|
||||
- lessons-learned.md
|
||||
- conflict-patterns.md
|
||||
|
||||
**Step 3.2: TodoWrite Completion**
|
||||
|
||||
Mark all tasks as completed.
|
||||
|
||||
**Step 3.3: Display Summary**
|
||||
|
||||
**Single Session Mode**:
|
||||
```
|
||||
Session {session_id} processed successfully
|
||||
|
||||
Updated:
|
||||
- sessions-timeline.md
|
||||
- lessons-learned.md
|
||||
- conflict-patterns.md
|
||||
- SKILL.md
|
||||
|
||||
SKILL Location: .claude/skills/workflow-progress/SKILL.md
|
||||
```
|
||||
|
||||
**All Sessions Mode**:
|
||||
```
|
||||
All sessions processed in parallel
|
||||
|
||||
Sessions: {count} total
|
||||
Functional Domains: {domain_list}
|
||||
Date Range: {earliest} - {latest}
|
||||
|
||||
Generated:
|
||||
- sessions-timeline.md ({count} sessions)
|
||||
- lessons-learned.md ({lessons_count} lessons)
|
||||
- conflict-patterns.md ({conflicts_count} conflicts)
|
||||
- SKILL.md (4-level progressive loading)
|
||||
|
||||
SKILL Location: .claude/skills/workflow-progress/SKILL.md
|
||||
|
||||
Usage:
|
||||
- Level 0: Quick refresh (~2K tokens)
|
||||
- Level 1: Recent history (~8K tokens)
|
||||
- Level 2: Complete analysis (~25K tokens)
|
||||
- Level 3: Deep dive (~40K tokens)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Agent Guidelines
|
||||
|
||||
### Agent Capabilities
|
||||
|
||||
**universal-executor agents can**:
|
||||
- Read files from `.workflow/.archives/`
|
||||
- Execute bash commands
|
||||
- Call Gemini CLI for intelligent analysis
|
||||
- Read template files for formatting guidance
|
||||
- Write SKILL package files (single mode) or return JSON (parallel mode)
|
||||
- Return structured results
|
||||
|
||||
### Gemini Usage Pattern
|
||||
|
||||
**When to use Gemini**:
|
||||
- Aggregating lessons from multiple sources
|
||||
- Identifying recurring patterns
|
||||
- Classifying conflicts by type and severity
|
||||
- Extracting structured data from IMPL_PLAN
|
||||
|
||||
**Fallback Strategy**: If Gemini fails or times out, use direct file parsing with structured extraction logic.
|
||||
|
||||
---
|
||||
|
||||
## Template System
|
||||
|
||||
### Template Files
|
||||
|
||||
All templates located in: `~/.claude/workflows/cli-templates/prompts/workflow/`
|
||||
|
||||
1. **skill-sessions-timeline.txt**: Format for sessions-timeline.md
|
||||
2. **skill-lessons-learned.txt**: Format for lessons-learned.md
|
||||
3. **skill-conflict-patterns.txt**: Format for conflict-patterns.md
|
||||
4. **skill-index.txt**: Format for SKILL.md index
|
||||
5. **skill-aggregation.txt**: Rules for Gemini aggregation (existing)
|
||||
|
||||
### Template Usage in Agent
|
||||
|
||||
**Agents read templates to understand**:
|
||||
- File structure and markdown format
|
||||
- Data sources (which files to read)
|
||||
- Update strategy (incremental vs full)
|
||||
- Formatting rules and conventions
|
||||
- Aggregation logic (for Gemini)
|
||||
|
||||
**Templates are NOT shown in this command documentation** - agents read them directly as needed.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Validation Errors
|
||||
- **No archives directory**: "Error: No workflow archives found at .workflow/.archives/"
|
||||
- **Invalid session ID format**: "Error: Invalid session ID format. Only WFS-* sessions are supported"
|
||||
- **Session not found**: "Error: Session {session_id} not found in archives"
|
||||
- **No WFS sessions in manifest**: "Error: No WFS-* workflow sessions found in manifest.json"
|
||||
|
||||
### Agent Errors
|
||||
- If agent fails, report error message from agent result
|
||||
- If Gemini times out, agents use fallback direct parsing
|
||||
- If template read fails, agents use inline format
|
||||
|
||||
### Recovery
|
||||
- Single session mode: Can be retried without affecting other sessions
|
||||
- All sessions mode: If one agent fails, others continue; retry failed sessions individually
|
||||
|
||||
|
||||
|
||||
## Integration
|
||||
|
||||
### Called by `/workflow:session:complete`
|
||||
|
||||
Automatically invoked after session archival:
|
||||
```bash
|
||||
SlashCommand(command="/memory:workflow-skill-memory session {session_id}")
|
||||
```
|
||||
|
||||
### Manual Invocation
|
||||
|
||||
Users can manually process sessions:
|
||||
```bash
|
||||
/memory:workflow-skill-memory session WFS-custom-feature # Single session
|
||||
/memory:workflow-skill-memory all # Full regeneration
|
||||
```
|
||||
@@ -1,204 +0,0 @@
|
||||
---
|
||||
name: breakdown
|
||||
description: Decompose complex task into subtasks with dependency mapping, creates child task JSONs with parent references and execution order
|
||||
argument-hint: "task-id"
|
||||
---
|
||||
|
||||
# Task Breakdown Command (/task:breakdown)
|
||||
|
||||
## Overview
|
||||
Breaks down complex tasks into executable subtasks with context inheritance and agent assignment.
|
||||
|
||||
## Core Principles
|
||||
**File Cohesion:** Related files must stay in same task
|
||||
**10-Task Limit:** Total tasks cannot exceed 10 (triggers re-scoping)
|
||||
|
||||
## Core Features
|
||||
|
||||
**CRITICAL**: Manual breakdown with safety controls to prevent file conflicts and task limit violations.
|
||||
|
||||
### Breakdown Process
|
||||
1. **Session Check**: Verify active session contains parent task
|
||||
2. **Task Validation**: Ensure parent is `pending` status
|
||||
3. **10-Task Limit Check**: Verify breakdown won't exceed total limit
|
||||
4. **Manual Decomposition**: User defines subtasks with validation
|
||||
5. **File Conflict Detection**: Warn if same files appear in multiple subtasks
|
||||
6. **Similar Function Warning**: Alert if subtasks have overlapping functionality
|
||||
7. **Context Distribution**: Inherit parent requirements and scope
|
||||
8. **Agent Assignment**: Auto-assign agents based on subtask type
|
||||
9. **TODO_LIST Update**: Regenerate TODO_LIST.md with new structure
|
||||
|
||||
### Breakdown Rules
|
||||
- Only `pending` tasks can be broken down
|
||||
- **Manual breakdown only**: Automated breakdown disabled to prevent violations
|
||||
- Parent becomes `container` status (not executable)
|
||||
- Subtasks use format: IMPL-N.M (max 2 levels)
|
||||
- Context flows from parent to subtasks
|
||||
- All relationships tracked in JSON
|
||||
- **10-task limit enforced**: Breakdown rejected if total would exceed 10 tasks
|
||||
- **File cohesion preserved**: Same files cannot be split across subtasks
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Breakdown
|
||||
```bash
|
||||
/task:breakdown impl-1
|
||||
```
|
||||
|
||||
Interactive process:
|
||||
```
|
||||
Task: Build authentication module
|
||||
Current total tasks: 6/10
|
||||
|
||||
MANUAL BREAKDOWN REQUIRED
|
||||
Define subtasks manually (remaining capacity: 4 tasks):
|
||||
|
||||
1. Enter subtask title: User authentication core
|
||||
Focus files: models/User.js, routes/auth.js, middleware/auth.js
|
||||
|
||||
2. Enter subtask title: OAuth integration
|
||||
Focus files: services/OAuthService.js, routes/oauth.js
|
||||
|
||||
FILE CONFLICT DETECTED:
|
||||
- routes/auth.js appears in multiple subtasks
|
||||
- Recommendation: Merge related authentication routes
|
||||
|
||||
SIMILAR FUNCTIONALITY WARNING:
|
||||
- "User authentication" and "OAuth integration" both handle auth
|
||||
- Consider combining into single task
|
||||
|
||||
# Use AskUserQuestion for confirmation
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "File conflicts and/or similar functionality detected. How do you want to proceed?",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "Proceed with breakdown", description: "Accept the risks and create the subtasks as defined." },
|
||||
{ label: "Restart breakdown", description: "Discard current subtasks and start over." },
|
||||
{ label: "Cancel breakdown", description: "Abort the operation and leave the parent task as is." }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
User selected: "Proceed with breakdown"
|
||||
|
||||
Task IMPL-1 broken down:
|
||||
IMPL-1: Build authentication module (container)
|
||||
├── IMPL-1.1: User authentication core -> @code-developer
|
||||
└── IMPL-1.2: OAuth integration -> @code-developer
|
||||
|
||||
Files updated: .task/IMPL-1.json + 2 subtask files + TODO_LIST.md
|
||||
```
|
||||
|
||||
## Decomposition Logic
|
||||
|
||||
### Agent Assignment
|
||||
- **Design/Planning** → `@planning-agent`
|
||||
- **Implementation** → `@code-developer`
|
||||
- **Testing** → `@code-developer` (type: "test-gen")
|
||||
- **Test Validation** → `@test-fix-agent` (type: "test-fix")
|
||||
- **Review** → `@universal-executor` (optional)
|
||||
|
||||
### Context Inheritance
|
||||
- Subtasks inherit parent requirements
|
||||
- Scope refined for specific subtask
|
||||
- Implementation details distributed appropriately
|
||||
|
||||
## Safety Controls
|
||||
|
||||
### File Conflict Detection
|
||||
**Validates file cohesion across subtasks:**
|
||||
- Scans `focus_paths` in all subtasks
|
||||
- Warns if same file appears in multiple subtasks
|
||||
- Suggests merging subtasks with overlapping files
|
||||
- Blocks breakdown if critical conflicts detected
|
||||
|
||||
### Similar Functionality Detection
|
||||
**Prevents functional overlap:**
|
||||
- Analyzes subtask titles for similar keywords
|
||||
- Warns about potential functional redundancy
|
||||
- Suggests consolidation of related functionality
|
||||
- Examples: "user auth" + "login system" → merge recommendation
|
||||
|
||||
### 10-Task Limit Enforcement
|
||||
**Hard limit compliance:**
|
||||
- Counts current total tasks in session
|
||||
- Calculates breakdown impact on total
|
||||
- Rejects breakdown if would exceed 10 tasks
|
||||
- Suggests re-scoping if limit reached
|
||||
|
||||
### Manual Control Requirements
|
||||
**User-driven breakdown only:**
|
||||
- No automatic subtask generation
|
||||
- User must define each subtask title and scope
|
||||
- Real-time validation during input
|
||||
- Confirmation required before execution
|
||||
|
||||
## Implementation Details
|
||||
|
||||
- Complete task JSON schema
|
||||
- Implementation field structure
|
||||
- Context inheritance rules
|
||||
- Agent assignment logic
|
||||
|
||||
## Validation
|
||||
|
||||
### Pre-breakdown Checks
|
||||
1. Active session exists
|
||||
2. Task found in session
|
||||
3. Task status is `pending`
|
||||
4. Not already broken down
|
||||
5. **10-task limit compliance**: Total tasks + new subtasks ≤ 10
|
||||
6. **Manual mode enabled**: No automatic breakdown allowed
|
||||
|
||||
### Post-breakdown Actions
|
||||
1. Update parent to `container` status
|
||||
2. Create subtask JSON files
|
||||
3. Update parent subtasks list
|
||||
4. Update session stats
|
||||
5. **Regenerate TODO_LIST.md** with new hierarchy
|
||||
6. Validate file paths in focus_paths
|
||||
7. Update session task count
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Breakdown
|
||||
```bash
|
||||
/task:breakdown impl-1
|
||||
|
||||
impl-1: Build authentication (container)
|
||||
├── impl-1.1: Design schema -> @planning-agent
|
||||
├── impl-1.2: Implement logic + tests -> @code-developer
|
||||
└── impl-1.3: Execute & fix tests -> @test-fix-agent
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```bash
|
||||
# Task not found
|
||||
Task IMPL-5 not found
|
||||
|
||||
# Already broken down
|
||||
Task IMPL-1 already has subtasks
|
||||
|
||||
# Wrong status
|
||||
Cannot breakdown completed task IMPL-2
|
||||
|
||||
# 10-task limit exceeded
|
||||
Breakdown would exceed 10-task limit (current: 8, proposed: 4)
|
||||
Suggestion: Re-scope project into smaller iterations
|
||||
|
||||
# File conflicts detected
|
||||
File conflict: routes/auth.js appears in IMPL-1.1 and IMPL-1.2
|
||||
Recommendation: Merge subtasks or redistribute files
|
||||
|
||||
# Similar functionality warning
|
||||
Similar functions detected: "user login" and "authentication"
|
||||
Consider consolidating related functionality
|
||||
|
||||
# Manual breakdown required
|
||||
Automatic breakdown disabled. Use manual breakdown process.
|
||||
```
|
||||
|
||||
**System ensures**: Manual breakdown control with file cohesion enforcement, similar functionality detection, and 10-task limit compliance
|
||||
@@ -1,152 +0,0 @@
|
||||
---
|
||||
name: create
|
||||
description: Generate task JSON from natural language description with automatic file pattern detection, scope inference, and dependency analysis
|
||||
argument-hint: "\"task title\""
|
||||
---
|
||||
|
||||
# Task Create Command (/task:create)
|
||||
|
||||
## Overview
|
||||
Creates new implementation tasks with automatic context awareness and ID generation.
|
||||
|
||||
## Core Principles
|
||||
**Task System:** @~/.claude/workflows/task-core.md
|
||||
|
||||
## Core Features
|
||||
|
||||
### Automatic Behaviors
|
||||
- **ID Generation**: Auto-generates IMPL-N format (max 2 levels)
|
||||
- **Context Inheritance**: Inherits from active workflow session
|
||||
- **JSON Creation**: Creates task JSON in active session
|
||||
- **Status Setting**: Initial status = "pending"
|
||||
- **Agent Assignment**: Suggests agent based on task type
|
||||
- **Session Integration**: Updates workflow session stats
|
||||
|
||||
### Context Awareness
|
||||
- Validates active workflow session exists
|
||||
- Avoids duplicate task IDs
|
||||
- Inherits session requirements and scope
|
||||
- Suggests task relationships
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Creation
|
||||
```bash
|
||||
/task:create "Build authentication module"
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
Task created: IMPL-1
|
||||
Title: Build authentication module
|
||||
Type: feature
|
||||
Agent: code-developer
|
||||
Status: pending
|
||||
```
|
||||
|
||||
### Task Types
|
||||
- `feature` - New functionality (default)
|
||||
- `bugfix` - Bug fixes
|
||||
- `refactor` - Code improvements
|
||||
- `test` - Test implementation
|
||||
- `docs` - Documentation
|
||||
|
||||
## Task Creation Process
|
||||
|
||||
1. **Session Validation**: Check active workflow session
|
||||
2. **ID Generation**: Auto-increment IMPL-N
|
||||
3. **Context Inheritance**: Load workflow context
|
||||
4. **Implementation Setup**: Initialize implementation field
|
||||
5. **Agent Assignment**: Select appropriate agent
|
||||
6. **File Creation**: Save JSON to .task/ directory
|
||||
7. **Session Update**: Update workflow stats
|
||||
|
||||
**Task Schema**: See @~/.claude/workflows/task-core.md for complete JSON structure
|
||||
|
||||
## Implementation Field Setup
|
||||
|
||||
### Auto-Population Strategy
|
||||
- **Detailed info**: Extract from task description and scope
|
||||
- **Missing info**: Mark `pre_analysis` as multi-step array format for later pre-analysis
|
||||
- **Basic structure**: Initialize with standard template
|
||||
|
||||
### Analysis Triggers
|
||||
When implementation details incomplete:
|
||||
```bash
|
||||
Task requires analysis for implementation details
|
||||
Suggest running: gemini analysis for file locations and dependencies
|
||||
```
|
||||
|
||||
## File Management
|
||||
|
||||
### JSON Task File
|
||||
- **Location**: `.task/IMPL-[N].json` in active session
|
||||
- **Content**: Complete task with implementation field
|
||||
- **Updates**: Session stats only
|
||||
|
||||
### Simple Process
|
||||
1. Validate session and inputs
|
||||
2. Generate task JSON
|
||||
3. Update session stats
|
||||
4. Notify completion
|
||||
|
||||
## Context Inheritance
|
||||
|
||||
Tasks inherit from:
|
||||
1. **Active Session** - Requirements and scope from workflow-session.json
|
||||
2. **Planning Document** - Context from IMPL_PLAN.md
|
||||
3. **Parent Task** - For subtasks (IMPL-N.M format)
|
||||
|
||||
## Agent Assignment
|
||||
|
||||
Based on task type and title keywords:
|
||||
- **Build/Implement** → @code-developer
|
||||
- **Design/Plan** → @planning-agent
|
||||
- **Test Generation** → @code-developer (type: "test-gen")
|
||||
- **Test Execution/Fix** → @test-fix-agent (type: "test-fix")
|
||||
- **Review/Audit** → @universal-executor (optional, only when explicitly requested)
|
||||
|
||||
## Validation Rules
|
||||
|
||||
1. **Session Check** - Active workflow session required
|
||||
2. **Duplicate Check** - Avoid similar task titles
|
||||
3. **ID Uniqueness** - Auto-increment task IDs
|
||||
4. **Schema Validation** - Ensure proper JSON structure
|
||||
|
||||
## Error Handling
|
||||
|
||||
```bash
|
||||
# No workflow session
|
||||
No active workflow found
|
||||
Use: /workflow init "project name"
|
||||
|
||||
# Duplicate task
|
||||
Similar task exists: IMPL-3
|
||||
Continue anyway? (y/n)
|
||||
|
||||
# Max depth exceeded
|
||||
Cannot create IMPL-1.2.1 (max 2 levels)
|
||||
Use: IMPL-2 for new main task
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Feature Task
|
||||
```bash
|
||||
/task:create "Implement user authentication"
|
||||
|
||||
Created IMPL-1: Implement user authentication
|
||||
Type: feature
|
||||
Agent: code-developer
|
||||
Status: pending
|
||||
```
|
||||
|
||||
### Bug Fix
|
||||
```bash
|
||||
/task:create "Fix login validation bug" --type=bugfix
|
||||
|
||||
Created IMPL-2: Fix login validation bug
|
||||
Type: bugfix
|
||||
Agent: code-developer
|
||||
Status: pending
|
||||
```
|
||||
@@ -1,270 +0,0 @@
|
||||
---
|
||||
name: execute
|
||||
description: Execute task JSON using appropriate agent (@doc-generator/@implementation-agent/@test-agent) with pre-analysis context loading and status tracking
|
||||
argument-hint: "task-id"
|
||||
---
|
||||
|
||||
## Command Overview: /task:execute
|
||||
|
||||
**Purpose**: Executes tasks using intelligent agent selection, context preparation, and progress tracking.
|
||||
|
||||
|
||||
## Execution Modes
|
||||
|
||||
- **auto (Default)**
|
||||
- Fully autonomous execution with automatic agent selection.
|
||||
- Provides progress updates at each checkpoint.
|
||||
- Automatically completes the task when done.
|
||||
- **guided**
|
||||
- Executes step-by-step, requiring user confirmation at each checkpoint.
|
||||
- Allows for dynamic adjustments and manual review during the process.
|
||||
- **review**
|
||||
- Optional manual review using `@universal-executor`.
|
||||
- Used only when explicitly requested by user.
|
||||
|
||||
## Agent Selection Logic
|
||||
|
||||
The system determines the appropriate agent for a task using the following logic.
|
||||
|
||||
```pseudo
|
||||
FUNCTION select_agent(task, agent_override):
|
||||
// A manual override always takes precedence.
|
||||
// Corresponds to the --agent=<agent-type> flag.
|
||||
IF agent_override IS NOT NULL:
|
||||
RETURN agent_override
|
||||
|
||||
// If no override, select based on keywords in the task title.
|
||||
ELSE:
|
||||
CASE task.title:
|
||||
WHEN CONTAINS "Build API", "Implement":
|
||||
RETURN "@code-developer"
|
||||
WHEN CONTAINS "Design schema", "Plan":
|
||||
RETURN "@planning-agent"
|
||||
WHEN CONTAINS "Write tests", "Generate tests":
|
||||
RETURN "@code-developer" // type: test-gen
|
||||
WHEN CONTAINS "Execute tests", "Fix tests", "Validate":
|
||||
RETURN "@test-fix-agent" // type: test-fix
|
||||
WHEN CONTAINS "Review code":
|
||||
RETURN "@universal-executor" // Optional manual review
|
||||
DEFAULT:
|
||||
RETURN "@code-developer" // Default agent
|
||||
END CASE
|
||||
END FUNCTION
|
||||
```
|
||||
|
||||
## Core Execution Protocol
|
||||
|
||||
`Pre-Execution` -> `Execution` -> `Post-Execution`
|
||||
|
||||
### Pre-Execution Protocol
|
||||
|
||||
`Validate Task & Dependencies` **->** `Prepare Execution Context` **->** `Coordinate with TodoWrite`
|
||||
|
||||
- **Validation**: Checks for the task's JSON file in `.task/` and resolves its dependencies.
|
||||
- **Context Preparation**: Loads task and workflow context, preparing it for the selected agent.
|
||||
- **Session Context Injection**: Provides workflow directory paths to agents for TODO_LIST.md and summary management.
|
||||
- **TodoWrite Coordination**: Generates execution Todos and checkpoints, syncing with `TODO_LIST.md`.
|
||||
|
||||
### Post-Execution Protocol
|
||||
|
||||
`Update Task Status` **->** `Generate Summary` **->** `Save Artifacts` **->** `Sync All Progress` **->** `Validate File Integrity`
|
||||
|
||||
- Updates status in the task's JSON file and `TODO_LIST.md`.
|
||||
- Creates a summary in `.summaries/`.
|
||||
- Stores outputs and syncs progress across the entire workflow session.
|
||||
|
||||
### Task & Subtask Execution Logic
|
||||
|
||||
This logic defines how single, multiple, or parent tasks are handled.
|
||||
|
||||
```pseudo
|
||||
FUNCTION execute_task_command(task_id, mode, parallel_flag):
|
||||
// Handle parent tasks by executing their subtasks.
|
||||
IF is_parent_task(task_id):
|
||||
subtasks = get_subtasks(task_id)
|
||||
EXECUTE_SUBTASK_BATCH(subtasks, mode)
|
||||
|
||||
// Handle wildcard execution (e.g., IMPL-001.*)
|
||||
ELSE IF task_id CONTAINS "*":
|
||||
subtasks = find_matching_tasks(task_id)
|
||||
IF parallel_flag IS true:
|
||||
EXECUTE_IN_PARALLEL(subtasks)
|
||||
ELSE:
|
||||
FOR each subtask in subtasks:
|
||||
EXECUTE_SINGLE_TASK(subtask, mode)
|
||||
|
||||
// Default case for a single task ID.
|
||||
ELSE:
|
||||
EXECUTE_SINGLE_TASK(task_id, mode)
|
||||
END FUNCTION
|
||||
```
|
||||
|
||||
### Error Handling & Recovery Logic
|
||||
|
||||
```pseudo
|
||||
FUNCTION pre_execution_check(task):
|
||||
// Ensure dependencies are met before starting.
|
||||
IF task.dependencies ARE NOT MET:
|
||||
LOG_ERROR("Cannot execute " + task.id)
|
||||
LOG_INFO("Blocked by: " + unmet_dependencies)
|
||||
HALT_EXECUTION()
|
||||
|
||||
FUNCTION on_execution_failure(checkpoint):
|
||||
// Provide user with recovery options upon failure.
|
||||
LOG_WARNING("Execution failed at checkpoint " + checkpoint)
|
||||
PRESENT_OPTIONS([
|
||||
"Retry from checkpoint",
|
||||
"Retry from beginning",
|
||||
"Switch to guided mode",
|
||||
"Abort execution"
|
||||
])
|
||||
AWAIT user_input
|
||||
// System performs the selected action.
|
||||
END FUNCTION
|
||||
```
|
||||
|
||||
|
||||
### Simplified Context Structure (JSON)
|
||||
|
||||
This is the simplified data structure loaded to provide context for task execution.
|
||||
|
||||
```json
|
||||
{
|
||||
"task": {
|
||||
"id": "IMPL-1",
|
||||
"title": "Build authentication module",
|
||||
"type": "feature",
|
||||
"status": "active",
|
||||
"agent": "code-developer",
|
||||
"context": {
|
||||
"requirements": ["JWT authentication", "OAuth2 support"],
|
||||
"scope": ["src/auth/*", "tests/auth/*"],
|
||||
"acceptance": ["Module handles JWT tokens", "OAuth2 flow implemented"],
|
||||
"inherited_from": "WFS-user-auth"
|
||||
},
|
||||
"relations": {
|
||||
"parent": null,
|
||||
"subtasks": ["IMPL-1.1", "IMPL-1.2"],
|
||||
"dependencies": ["IMPL-0"]
|
||||
},
|
||||
"implementation": {
|
||||
"files": [
|
||||
{
|
||||
"path": "src/auth/login.ts",
|
||||
"location": {
|
||||
"function": "authenticateUser",
|
||||
"lines": "25-65",
|
||||
"description": "Main authentication logic"
|
||||
},
|
||||
"original_code": "// Code snippet extracted via gemini analysis",
|
||||
"modifications": {
|
||||
"current_state": "Basic password authentication only",
|
||||
"proposed_changes": [
|
||||
"Add JWT token generation",
|
||||
"Implement OAuth2 callback handling",
|
||||
"Add multi-factor authentication support"
|
||||
],
|
||||
"logic_flow": [
|
||||
"validateCredentials() ───► checkUserExists()",
|
||||
"◊─── if password ───► generateJWT() ───► return token",
|
||||
"◊─── if OAuth ───► validateOAuthCode() ───► exchangeForToken()",
|
||||
"◊─── if MFA ───► sendMFACode() ───► awaitVerification()"
|
||||
],
|
||||
"reason": "Support modern authentication standards and security requirements",
|
||||
"expected_outcome": "Comprehensive authentication system supporting multiple methods"
|
||||
}
|
||||
}
|
||||
],
|
||||
"context_notes": {
|
||||
"dependencies": ["jsonwebtoken", "passport", "speakeasy"],
|
||||
"affected_modules": ["user-session", "auth-middleware", "api-routes"],
|
||||
"risks": [
|
||||
"Breaking changes to existing login endpoints",
|
||||
"Token storage and rotation complexity",
|
||||
"OAuth provider configuration dependencies"
|
||||
],
|
||||
"performance_considerations": "JWT validation adds ~10ms per request, OAuth callbacks may timeout",
|
||||
"error_handling": "Ensure sensitive authentication errors don't leak user enumeration data"
|
||||
},
|
||||
"pre_analysis": [
|
||||
{
|
||||
"action": "analyze patterns",
|
||||
"template": "~/.claude/workflows/cli-templates/prompts/analysis/02-analyze-code-patterns.txt",
|
||||
"method": "gemini"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"workflow": {
|
||||
"session": "WFS-user-auth",
|
||||
"phase": "IMPLEMENT",
|
||||
"session_context": {
|
||||
"workflow_directory": ".workflow/WFS-user-auth/",
|
||||
"todo_list_location": ".workflow/WFS-user-auth/TODO_LIST.md",
|
||||
"summaries_directory": ".workflow/WFS-user-auth/.summaries/",
|
||||
"task_json_location": ".workflow/WFS-user-auth/.task/"
|
||||
}
|
||||
},
|
||||
"execution": {
|
||||
"agent": "code-developer",
|
||||
"mode": "auto",
|
||||
"attempts": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Agent-Specific Context
|
||||
|
||||
Different agents receive context tailored to their function, including implementation details:
|
||||
|
||||
**`@code-developer`**:
|
||||
- Complete implementation.files array with file paths and locations
|
||||
- original_code snippets and proposed_changes for precise modifications
|
||||
- logic_flow diagrams for understanding data flow
|
||||
- Dependencies and affected modules for integration planning
|
||||
- Performance and error handling considerations
|
||||
|
||||
**`@planning-agent`**:
|
||||
- High-level requirements, constraints, success criteria
|
||||
- Implementation risks and mitigation strategies
|
||||
- Architecture implications from implementation.context_notes
|
||||
|
||||
**`@test-fix-agent`**:
|
||||
- Test files to execute from task.context.focus_paths
|
||||
- Source files to fix from implementation.files[].path
|
||||
- Expected behaviors from implementation.modifications.logic_flow
|
||||
- Error conditions to validate from implementation.context_notes.error_handling
|
||||
- Performance requirements from implementation.context_notes.performance_considerations
|
||||
|
||||
**`@universal-executor`**:
|
||||
- Used for optional manual reviews when explicitly requested
|
||||
- Code quality standards and implementation patterns
|
||||
- Security considerations from implementation.context_notes.risks
|
||||
- Dependency validation from implementation.context_notes.dependencies
|
||||
- Architecture compliance checks
|
||||
|
||||
### Simplified File Output
|
||||
|
||||
- **Task JSON File (`.task/<task-id>.json`)**: Updated with status and last attempt time only.
|
||||
- **Session File (`workflow-session.json`)**: Updated task stats (completed count).
|
||||
- **Summary File**: Generated in `.summaries/` upon completion (optional).
|
||||
|
||||
### Simplified Summary Template
|
||||
|
||||
Optional summary file generated at `.summaries/IMPL-[task-id]-summary.md`.
|
||||
|
||||
```markdown
|
||||
# Task Summary: IMPL-1 Build Authentication Module
|
||||
|
||||
## What Was Done
|
||||
- Created src/auth/login.ts with JWT validation
|
||||
- Added tests in tests/auth.test.ts
|
||||
|
||||
## Execution Results
|
||||
- **Agent**: code-developer
|
||||
- **Status**: completed
|
||||
|
||||
## Files Modified
|
||||
- `src/auth/login.ts` (created)
|
||||
- `tests/auth.test.ts` (created)
|
||||
```
|
||||
@@ -1,432 +0,0 @@
|
||||
---
|
||||
name: replan
|
||||
description: Update task JSON with new requirements or batch-update multiple tasks from verification report, tracks changes in task-changes.json
|
||||
argument-hint: "task-id [\"text\"|file.md] | --batch [verification-report.md]"
|
||||
allowed-tools: Read(*), Write(*), Edit(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
# Task Replan Command (/task:replan)
|
||||
|
||||
## Overview
|
||||
Replans individual tasks or batch processes multiple tasks with change tracking and backup management.
|
||||
|
||||
**Modes**:
|
||||
- **Single Task Mode**: Replan one task with specific changes
|
||||
- **Batch Mode**: Process multiple tasks from action-plan verification report
|
||||
|
||||
## Core Principles
|
||||
**Task System:** @~/.claude/workflows/task-core.md
|
||||
|
||||
## Key Features
|
||||
- **Single/Batch Operations**: Single task or multiple tasks from verification report
|
||||
- **Multiple Input Sources**: Text, files, or verification report
|
||||
- **Backup Management**: Automatic backup of previous versions
|
||||
- **Change Documentation**: Track all modifications
|
||||
- **Progress Tracking**: TodoWrite integration for batch operations
|
||||
|
||||
**CRITICAL**: Validates active session before replanning
|
||||
|
||||
## Operation Modes
|
||||
|
||||
### Single Task Mode
|
||||
|
||||
#### Direct Text (Default)
|
||||
```bash
|
||||
/task:replan IMPL-1 "Add OAuth2 authentication support"
|
||||
```
|
||||
|
||||
#### File-based Input
|
||||
```bash
|
||||
/task:replan IMPL-1 updated-specs.md
|
||||
```
|
||||
Supports: .md, .txt, .json, .yaml
|
||||
|
||||
#### Interactive Mode
|
||||
```bash
|
||||
/task:replan IMPL-1 --interactive
|
||||
```
|
||||
Guided step-by-step modification process with validation
|
||||
|
||||
### Batch Mode
|
||||
|
||||
#### From Verification Report
|
||||
```bash
|
||||
/task:replan --batch ACTION_PLAN_VERIFICATION.md
|
||||
```
|
||||
|
||||
**Workflow**:
|
||||
1. Parse verification report to extract replan recommendations
|
||||
2. Create TodoWrite task list for all modifications
|
||||
3. Process each task sequentially with confirmation
|
||||
4. Track progress and generate summary report
|
||||
|
||||
**Auto-detection**: If input file contains "Action Plan Verification Report" header, automatically enters batch mode
|
||||
|
||||
## Replanning Process
|
||||
|
||||
### Single Task Process
|
||||
|
||||
1. **Load & Validate**: Read task JSON and validate session
|
||||
2. **Parse Input**: Process changes from input source
|
||||
3. **Create Backup**: Save previous version to backup folder
|
||||
4. **Update Task**: Modify JSON structure and relationships
|
||||
5. **Save Changes**: Write updated task and increment version
|
||||
6. **Update Session**: Reflect changes in workflow stats
|
||||
|
||||
### Batch Process
|
||||
|
||||
1. **Parse Verification Report**: Extract all replan recommendations
|
||||
2. **Initialize TodoWrite**: Create task list for tracking
|
||||
3. **For Each Task**:
|
||||
- Mark todo as in_progress
|
||||
- Load and validate task JSON
|
||||
- Create backup
|
||||
- Apply recommended changes
|
||||
- Save updated task
|
||||
- Mark todo as completed
|
||||
4. **Generate Summary**: Report all changes and backup locations
|
||||
|
||||
## Backup Management
|
||||
|
||||
### Backup Tracking
|
||||
Tasks maintain backup history:
|
||||
```json
|
||||
{
|
||||
"id": "IMPL-1",
|
||||
"version": "1.2",
|
||||
"replan_history": [
|
||||
{
|
||||
"version": "1.2",
|
||||
"reason": "Add OAuth2 support",
|
||||
"input_source": "direct_text",
|
||||
"backup_location": ".task/backup/IMPL-1-v1.1.json",
|
||||
"timestamp": "2025-10-17T10:30:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Complete schema**: See @~/.claude/workflows/task-core.md
|
||||
|
||||
### File Structure
|
||||
```
|
||||
.task/
|
||||
├── IMPL-1.json # Current version
|
||||
├── backup/
|
||||
│ ├── IMPL-1-v1.0.json # Original version
|
||||
│ ├── IMPL-1-v1.1.json # Previous backup
|
||||
│ └── IMPL-1-v1.2.json # Latest backup
|
||||
└── [new subtasks as needed]
|
||||
```
|
||||
|
||||
**Backup Naming**: `{task-id}-v{version}.json`
|
||||
|
||||
## Implementation Updates
|
||||
|
||||
### Change Detection
|
||||
Tracks modifications to:
|
||||
- Files in implementation.files array
|
||||
- Dependencies and affected modules
|
||||
- Risk assessments and performance notes
|
||||
- Logic flows and code locations
|
||||
|
||||
### Analysis Triggers
|
||||
May require gemini re-analysis when:
|
||||
- New files need code extraction
|
||||
- Function locations change
|
||||
- Dependencies require re-evaluation
|
||||
|
||||
## Document Updates
|
||||
|
||||
### Planning Document
|
||||
May update IMPL_PLAN.md sections when task structure changes significantly
|
||||
|
||||
### TODO List Sync
|
||||
If TODO_LIST.md exists, synchronizes:
|
||||
- New subtasks (with [ ] checkbox)
|
||||
- Modified tasks (marked as updated)
|
||||
- Removed subtasks (deleted from list)
|
||||
|
||||
## Change Documentation
|
||||
|
||||
### Change Summary
|
||||
Generates brief change log with:
|
||||
- Version increment (1.1 → 1.2)
|
||||
- Input source and reason
|
||||
- Key modifications made
|
||||
- Files updated/created
|
||||
- Backup location
|
||||
|
||||
## Session Updates
|
||||
|
||||
Updates workflow-session.json with:
|
||||
- Modified task tracking
|
||||
- Task count changes (if subtasks added/removed)
|
||||
- Last modification timestamps
|
||||
|
||||
## Rollback Support
|
||||
|
||||
```bash
|
||||
/task:replan IMPL-1 --rollback v1.1
|
||||
|
||||
Rollback to version 1.1:
|
||||
- Restore task from backup/.../IMPL-1-v1.1.json
|
||||
- Remove new subtasks if any
|
||||
- Update session stats
|
||||
|
||||
# Use AskUserQuestion for confirmation
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Are you sure you want to roll back this task to a previous version?",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "Yes, rollback", description: "Restore the task from the selected backup." },
|
||||
{ label: "No, cancel", description: "Keep the current version of the task." }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
User selected: "Yes, rollback"
|
||||
|
||||
Task rolled back to version 1.1
|
||||
```
|
||||
|
||||
## Batch Processing with TodoWrite
|
||||
|
||||
### Progress Tracking
|
||||
When processing multiple tasks, automatically creates TodoWrite task list:
|
||||
|
||||
```markdown
|
||||
**Batch Replan Progress**:
|
||||
- [x] IMPL-002: Add FR-12 draft saving acceptance criteria
|
||||
- [x] IMPL-003: Add FR-14 history tracking acceptance criteria
|
||||
- [ ] IMPL-004: Add FR-09 response surface explicit coverage
|
||||
- [ ] IMPL-008: Add NFR performance validation steps
|
||||
```
|
||||
|
||||
### Batch Report
|
||||
After completion, generates summary:
|
||||
```markdown
|
||||
## Batch Replan Summary
|
||||
|
||||
**Total Tasks**: 4
|
||||
**Successful**: 3
|
||||
**Failed**: 1
|
||||
**Skipped**: 0
|
||||
|
||||
### Changes Made
|
||||
- IMPL-002 v1.0 → v1.1: Added FR-12 acceptance criteria
|
||||
- IMPL-003 v1.0 → v1.1: Added FR-14 acceptance criteria
|
||||
- IMPL-004 v1.0 → v1.1: Added FR-09 explicit coverage
|
||||
|
||||
### Backups Created
|
||||
- .task/backup/IMPL-002-v1.0.json
|
||||
- .task/backup/IMPL-003-v1.0.json
|
||||
- .task/backup/IMPL-004-v1.0.json
|
||||
|
||||
### Errors
|
||||
- IMPL-008: File not found (task may have been renamed)
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Single Task - Text Input
|
||||
```bash
|
||||
/task:replan IMPL-1 "Add OAuth2 authentication support"
|
||||
|
||||
Processing changes...
|
||||
Proposed updates:
|
||||
+ Add OAuth2 integration
|
||||
+ Update authentication flow
|
||||
|
||||
# Use AskUserQuestion for confirmation
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Do you want to apply these changes to the task?",
|
||||
header: "Apply",
|
||||
options: [
|
||||
{ label: "Yes, apply", description: "Create new version with these changes." },
|
||||
{ label: "No, cancel", description: "Discard changes and keep current version." }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
User selected: "Yes, apply"
|
||||
|
||||
Version 1.2 created
|
||||
Context updated
|
||||
Backup saved to .task/backup/IMPL-1-v1.1.json
|
||||
```
|
||||
|
||||
### Single Task - File Input
|
||||
```bash
|
||||
/task:replan IMPL-2 requirements.md
|
||||
|
||||
Loading requirements.md...
|
||||
Applying specification changes...
|
||||
|
||||
Task updated with new requirements
|
||||
Version 1.1 created
|
||||
Backup saved to .task/backup/IMPL-2-v1.0.json
|
||||
```
|
||||
|
||||
### Batch Mode - From Verification Report
|
||||
```bash
|
||||
/task:replan --batch .workflow/WFS-{session}/.process/ACTION_PLAN_VERIFICATION.md
|
||||
|
||||
Parsing verification report...
|
||||
Found 4 tasks requiring replanning:
|
||||
- IMPL-002: Add FR-12 draft saving acceptance criteria
|
||||
- IMPL-003: Add FR-14 history tracking acceptance criteria
|
||||
- IMPL-004: Add FR-09 response surface explicit coverage
|
||||
- IMPL-008: Add NFR performance validation steps
|
||||
|
||||
Creating task tracking list...
|
||||
|
||||
Processing IMPL-002...
|
||||
Backup created: .task/backup/IMPL-002-v1.0.json
|
||||
Updated to v1.1
|
||||
|
||||
Processing IMPL-003...
|
||||
Backup created: .task/backup/IMPL-003-v1.0.json
|
||||
Updated to v1.1
|
||||
|
||||
Processing IMPL-004...
|
||||
Backup created: .task/backup/IMPL-004-v1.0.json
|
||||
Updated to v1.1
|
||||
|
||||
Processing IMPL-008...
|
||||
Backup created: .task/backup/IMPL-008-v1.0.json
|
||||
Updated to v1.1
|
||||
|
||||
Batch replan completed: 4/4 successful
|
||||
Summary report saved
|
||||
```
|
||||
|
||||
### Batch Mode - Auto-detection
|
||||
```bash
|
||||
# If file contains "Action Plan Verification Report", auto-enters batch mode
|
||||
/task:replan ACTION_PLAN_VERIFICATION.md
|
||||
|
||||
Detected verification report format
|
||||
Entering batch mode...
|
||||
[same as above]
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Single Task Errors
|
||||
```bash
|
||||
# Task not found
|
||||
Task IMPL-5 not found
|
||||
Check task ID with /workflow:status
|
||||
|
||||
# Task completed
|
||||
Task IMPL-1 is completed (cannot replan)
|
||||
Create new task for additional work
|
||||
|
||||
# File not found
|
||||
File requirements.md not found
|
||||
Check file path
|
||||
|
||||
# No input provided
|
||||
Please specify changes needed
|
||||
Provide text, file, or verification report
|
||||
```
|
||||
|
||||
### Batch Mode Errors
|
||||
```bash
|
||||
# Invalid verification report
|
||||
File does not contain valid verification report format
|
||||
Check report structure or use single task mode
|
||||
|
||||
# Partial failures
|
||||
Batch completed with errors: 3/4 successful
|
||||
Review error details in summary report
|
||||
|
||||
# No replan recommendations found
|
||||
Verification report contains no replan recommendations
|
||||
Check report content or use /workflow:action-plan-verify first
|
||||
```
|
||||
|
||||
## Batch Mode Integration
|
||||
|
||||
### Input Format Expectations
|
||||
Batch mode parses verification reports looking for:
|
||||
|
||||
1. **Required Actions Section**: Commands like `/task:replan IMPL-X "changes"`
|
||||
2. **Findings Table**: Task IDs with recommendations
|
||||
3. **Next Actions Section**: Specific replan commands
|
||||
|
||||
**Example Patterns**:
|
||||
```markdown
|
||||
#### 1. HIGH Priority - Address FR Coverage Gaps
|
||||
/task:replan IMPL-004 "
|
||||
Add explicit acceptance criteria:
|
||||
- FR-09: Response surface 3D visualization
|
||||
"
|
||||
|
||||
#### 2. MEDIUM Priority - Enhance NFR Coverage
|
||||
/task:replan IMPL-008 "
|
||||
Add performance testing:
|
||||
- NFR-01: Load test API endpoints
|
||||
"
|
||||
```
|
||||
|
||||
### Extraction Logic
|
||||
1. Scan for `/task:replan` commands in report
|
||||
2. Extract task ID and change description
|
||||
3. Group by priority (HIGH, MEDIUM, LOW)
|
||||
4. Process in priority order with TodoWrite tracking
|
||||
|
||||
### Confirmation Behavior
|
||||
- **Default**: Confirm each task before applying
|
||||
- **With `--auto-confirm`**: Apply all changes without prompting
|
||||
```bash
|
||||
/task:replan --batch report.md --auto-confirm
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Backup Management
|
||||
```typescript
|
||||
// Backup file naming convention
|
||||
const backupPath = `.task/backup/${taskId}-v${previousVersion}.json`;
|
||||
|
||||
// Backup metadata in task JSON
|
||||
{
|
||||
"replan_history": [
|
||||
{
|
||||
"version": "1.2",
|
||||
"timestamp": "2025-10-17T10:30:00Z",
|
||||
"reason": "Add FR-09 explicit coverage",
|
||||
"input_source": "batch_verification_report",
|
||||
"backup_location": ".task/backup/IMPL-004-v1.1.json"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### TodoWrite Integration
|
||||
```typescript
|
||||
// Initialize tracking for batch mode
|
||||
TodoWrite({
|
||||
todos: taskList.map(task => ({
|
||||
content: `${task.id}: ${task.changeDescription}`,
|
||||
status: "pending",
|
||||
activeForm: `Replanning ${task.id}`
|
||||
}))
|
||||
});
|
||||
|
||||
// Update progress during processing
|
||||
TodoWrite({
|
||||
todos: updateTaskStatus(taskId, "in_progress")
|
||||
});
|
||||
|
||||
// Mark completed
|
||||
TodoWrite({
|
||||
todos: updateTaskStatus(taskId, "completed")
|
||||
});
|
||||
```
|
||||
@@ -1,254 +0,0 @@
|
||||
---
|
||||
name: version
|
||||
description: Display Claude Code version information and check for updates
|
||||
allowed-tools: Bash(*)
|
||||
---
|
||||
|
||||
# Version Command (/version)
|
||||
|
||||
## Purpose
|
||||
Display local and global installation versions, check for the latest updates from GitHub, and provide upgrade recommendations.
|
||||
|
||||
## Execution Flow
|
||||
1. **Local Version Check**: Read version information from `./.claude/version.json` if it exists.
|
||||
2. **Global Version Check**: Read version information from `~/.claude/version.json` if it exists.
|
||||
3. **Fetch Remote Versions**: Use GitHub API to get the latest stable release tag and the latest commit hash from the main branch.
|
||||
4. **Compare & Suggest**: Compare installed versions with the latest remote versions and provide upgrade suggestions if applicable.
|
||||
|
||||
## Step 1: Check Local Version
|
||||
|
||||
### Check if local version.json exists
|
||||
```bash
|
||||
bash(test -f ./.claude/version.json && echo "found" || echo "not_found")
|
||||
```
|
||||
|
||||
### Read local version (if exists)
|
||||
```bash
|
||||
bash(cat ./.claude/version.json)
|
||||
```
|
||||
|
||||
### Extract version with jq (preferred)
|
||||
```bash
|
||||
bash(cat ./.claude/version.json | grep -o '"version": *"[^"]*"' | cut -d'"' -f4)
|
||||
```
|
||||
|
||||
### Extract installation date
|
||||
```bash
|
||||
bash(cat ./.claude/version.json | grep -o '"installation_date_utc": *"[^"]*"' | cut -d'"' -f4)
|
||||
```
|
||||
|
||||
**Output Format**:
|
||||
```
|
||||
Local Version: 3.2.1
|
||||
Installed: 2025-10-03T12:00:00Z
|
||||
```
|
||||
|
||||
## Step 2: Check Global Version
|
||||
|
||||
### Check if global version.json exists
|
||||
```bash
|
||||
bash(test -f ~/.claude/version.json && echo "found" || echo "not_found")
|
||||
```
|
||||
|
||||
### Read global version
|
||||
```bash
|
||||
bash(cat ~/.claude/version.json)
|
||||
```
|
||||
|
||||
### Extract version
|
||||
```bash
|
||||
bash(cat ~/.claude/version.json | grep -o '"version": *"[^"]*"' | cut -d'"' -f4)
|
||||
```
|
||||
|
||||
### Extract installation date
|
||||
```bash
|
||||
bash(cat ~/.claude/version.json | grep -o '"installation_date_utc": *"[^"]*"' | cut -d'"' -f4)
|
||||
```
|
||||
|
||||
**Output Format**:
|
||||
```
|
||||
Global Version: 3.2.1
|
||||
Installed: 2025-10-03T12:00:00Z
|
||||
```
|
||||
|
||||
## Step 3: Fetch Latest Stable Release
|
||||
|
||||
### Call GitHub API for latest release (with timeout)
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract tag name (version)
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null | grep -o '"tag_name": *"[^"]*"' | head -1 | cut -d'"' -f4, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract release name
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null | grep -o '"name": *"[^"]*"' | head -1 | cut -d'"' -f4, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract published date
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null | grep -o '"published_at": *"[^"]*"' | cut -d'"' -f4, timeout: 30000)
|
||||
```
|
||||
|
||||
**Output Format**:
|
||||
```
|
||||
Latest Stable: v3.2.2
|
||||
Release: v3.2.2: Independent Test-Gen Workflow with Cross-Session Context
|
||||
Published: 2025-10-03T04:10:08Z
|
||||
```
|
||||
|
||||
## Step 4: Fetch Latest Main Branch
|
||||
|
||||
### Call GitHub API for latest commit on main (with timeout)
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract commit SHA (short)
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null | grep -o '"sha": *"[^"]*"' | head -1 | cut -d'"' -f4 | cut -c1-7, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract commit message (first line only)
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null | grep '"message":' | cut -d'"' -f4 | cut -d'\' -f1, timeout: 30000)
|
||||
```
|
||||
|
||||
### Extract commit date
|
||||
```bash
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null | grep -o '"date": *"[^"]*"' | head -1 | cut -d'"' -f4, timeout: 30000)
|
||||
```
|
||||
|
||||
**Output Format**:
|
||||
```
|
||||
Latest Dev: a03415b
|
||||
Message: feat: Add version tracking and upgrade check system
|
||||
Date: 2025-10-03T04:46:44Z
|
||||
```
|
||||
|
||||
## Step 5: Compare Versions and Suggest Upgrade
|
||||
|
||||
### Normalize versions (remove 'v' prefix)
|
||||
```bash
|
||||
bash(echo "v3.2.1" | sed 's/^v//')
|
||||
```
|
||||
|
||||
### Compare two versions
|
||||
```bash
|
||||
bash(printf "%s\n%s" "3.2.1" "3.2.2" | sort -V | tail -n 1)
|
||||
```
|
||||
|
||||
### Check if versions are equal
|
||||
```bash
|
||||
# If equal: Up to date
|
||||
# If remote newer: Upgrade available
|
||||
# If local newer: Development version
|
||||
```
|
||||
|
||||
**Output Scenarios**:
|
||||
|
||||
**Scenario 1: Up to date**
|
||||
```
|
||||
You are on the latest stable version (3.2.1)
|
||||
```
|
||||
|
||||
**Scenario 2: Upgrade available**
|
||||
```
|
||||
A newer stable version is available: v3.2.2
|
||||
Your version: 3.2.1
|
||||
|
||||
To upgrade:
|
||||
PowerShell: iex (iwr -useb https://raw.githubusercontent.com/catlog22/Claude-Code-Workflow/main/install-remote.ps1)
|
||||
Bash: bash <(curl -fsSL https://raw.githubusercontent.com/catlog22/Claude-Code-Workflow/main/install-remote.sh)
|
||||
```
|
||||
|
||||
**Scenario 3: Development version**
|
||||
```
|
||||
You are running a development version (3.4.0-dev)
|
||||
This is newer than the latest stable release (v3.3.0)
|
||||
```
|
||||
|
||||
## Simple Bash Commands
|
||||
|
||||
### Basic Operations
|
||||
```bash
|
||||
# Check local version file
|
||||
bash(test -f ./.claude/version.json && cat ./.claude/version.json)
|
||||
|
||||
# Check global version file
|
||||
bash(test -f ~/.claude/version.json && cat ~/.claude/version.json)
|
||||
|
||||
# Extract version from JSON
|
||||
bash(cat version.json | grep -o '"version": *"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
# Extract date from JSON
|
||||
bash(cat version.json | grep -o '"installation_date_utc": *"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
# Fetch latest release (with timeout)
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null, timeout: 30000)
|
||||
|
||||
# Extract tag name
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null | grep -o '"tag_name": *"[^"]*"' | cut -d'"' -f4, timeout: 30000)
|
||||
|
||||
# Extract release name
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest" 2>/dev/null | grep -o '"name": *"[^"]*"' | head -1 | cut -d'"' -f4, timeout: 30000)
|
||||
|
||||
# Fetch latest commit (with timeout)
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null, timeout: 30000)
|
||||
|
||||
# Extract commit SHA
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null | grep -o '"sha": *"[^"]*"' | head -1 | cut -d'"' -f4 | cut -c1-7, timeout: 30000)
|
||||
|
||||
# Extract commit message (first line)
|
||||
bash(curl -fsSL "https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main" 2>/dev/null | grep '"message":' | cut -d'"' -f4 | cut -d'\' -f1, timeout: 30000)
|
||||
|
||||
# Compare versions
|
||||
bash(printf "%s\n%s" "3.2.1" "3.2.2" | sort -V | tail -n 1)
|
||||
|
||||
# Remove 'v' prefix
|
||||
bash(echo "v3.2.1" | sed 's/^v//')
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### No installation found
|
||||
```
|
||||
WARNING: Claude Code Workflow not installed
|
||||
Install using:
|
||||
PowerShell: iex (iwr -useb https://raw.githubusercontent.com/catlog22/Claude-Code-Workflow/main/install-remote.ps1)
|
||||
```
|
||||
|
||||
### Network error
|
||||
```
|
||||
ERROR: Could not fetch latest version from GitHub
|
||||
Check your network connection
|
||||
```
|
||||
|
||||
### Invalid version.json
|
||||
```
|
||||
ERROR: version.json is invalid or corrupted
|
||||
```
|
||||
|
||||
## Design Notes
|
||||
|
||||
- Uses simple, direct bash commands instead of complex functions
|
||||
- Each step is independent and can be executed separately
|
||||
- Fallback to grep/sed for JSON parsing (no jq dependency required)
|
||||
- Network calls use curl with error suppression and 30-second timeout
|
||||
- Version comparison uses `sort -V` for accurate semantic versioning
|
||||
- Use `/commits/main` API instead of `/branches/main` for more reliable commit info
|
||||
- Extract first line of commit message using `cut -d'\' -f1` to handle JSON escape sequences
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### GitHub API Used
|
||||
- **Latest Release**: `https://api.github.com/repos/catlog22/Claude-Code-Workflow/releases/latest`
|
||||
- Fields: `tag_name`, `name`, `published_at`
|
||||
- **Latest Commit**: `https://api.github.com/repos/catlog22/Claude-Code-Workflow/commits/main`
|
||||
- Fields: `sha`, `commit.message`, `commit.author.date`
|
||||
|
||||
### Timeout Configuration
|
||||
All network calls should use `timeout: 30000` (30 seconds) to handle slow connections.
|
||||
367
.claude/commands/view.md
Normal file
367
.claude/commands/view.md
Normal file
@@ -0,0 +1,367 @@
|
||||
---
|
||||
name: ccw view
|
||||
description: Dashboard - Open CCW workflow dashboard for managing tasks and sessions
|
||||
category: general
|
||||
---
|
||||
|
||||
# CCW View Command
|
||||
|
||||
Open the CCW workflow dashboard for visualizing and managing project tasks, sessions, and workflow execution status.
|
||||
|
||||
## Description
|
||||
|
||||
`ccw view` launches an interactive web dashboard that provides:
|
||||
- **Workflow Overview**: Visualize current workflow status and command chain execution
|
||||
- **Session Management**: View and manage active workflow sessions
|
||||
- **Task Tracking**: Monitor TODO items and task progress
|
||||
- **Workspace Switching**: Switch between different project workspaces
|
||||
- **Real-time Updates**: Live updates of command execution and status
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Open dashboard for current workspace
|
||||
ccw view
|
||||
|
||||
# Specify workspace path
|
||||
ccw view --path /path/to/workspace
|
||||
|
||||
# Custom port (default: 3456)
|
||||
ccw view --port 3000
|
||||
|
||||
# Bind to specific host
|
||||
ccw view --host 0.0.0.0 --port 3456
|
||||
|
||||
# Open without launching browser
|
||||
ccw view --no-browser
|
||||
|
||||
# Show URL without opening browser
|
||||
ccw view --no-browser
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Default | Description |
|
||||
|--------|---------|-------------|
|
||||
| `--path <path>` | Current directory | Workspace path to display |
|
||||
| `--port <port>` | 3456 | Server port for dashboard |
|
||||
| `--host <host>` | 127.0.0.1 | Server host/bind address |
|
||||
| `--no-browser` | false | Don't launch browser automatically |
|
||||
| `-h, --help` | - | Show help message |
|
||||
|
||||
## Features
|
||||
|
||||
### Dashboard Sections
|
||||
|
||||
#### 1. **Workflow Overview**
|
||||
- Current workflow status
|
||||
- Command chain visualization (with Minimum Execution Units marked)
|
||||
- Live progress tracking
|
||||
- Error alerts
|
||||
|
||||
#### 2. **Session Management**
|
||||
- List active sessions by type (workflow, review, tdd)
|
||||
- Session details (created time, last activity, session ID)
|
||||
- Quick actions (resume, pause, complete)
|
||||
- Session logs/history
|
||||
|
||||
#### 3. **Task Tracking**
|
||||
- TODO list with status indicators
|
||||
- Progress percentage
|
||||
- Task grouping by workflow stage
|
||||
- Quick inline task updates
|
||||
|
||||
#### 4. **Workspace Switcher**
|
||||
- Browse available workspaces
|
||||
- Switch context with one click
|
||||
- Recent workspaces list
|
||||
|
||||
#### 5. **Command History**
|
||||
- Recent commands executed
|
||||
- Execution time and status
|
||||
- Quick re-run options
|
||||
|
||||
### Keyboard Shortcuts
|
||||
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| `R` | Refresh dashboard |
|
||||
| `Cmd/Ctrl + J` | Jump to session search |
|
||||
| `Cmd/Ctrl + K` | Open command palette |
|
||||
| `?` | Show help |
|
||||
|
||||
## Multi-Instance Support
|
||||
|
||||
The dashboard supports multiple concurrent instances:
|
||||
|
||||
```bash
|
||||
# Terminal 1: Workspace A on port 3456
|
||||
ccw view --path ~/projects/workspace-a
|
||||
|
||||
# Terminal 2: Workspace B on port 3457
|
||||
ccw view --path ~/projects/workspace-b --port 3457
|
||||
|
||||
# Switching workspaces on the same port
|
||||
ccw view --path ~/projects/workspace-c # Auto-switches existing server
|
||||
```
|
||||
|
||||
When the server is already running and you execute `ccw view` with a different path:
|
||||
1. Detects running server on the port
|
||||
2. Sends workspace switch request
|
||||
3. Updates dashboard to new workspace
|
||||
4. Opens browser with updated context
|
||||
|
||||
## Server Lifecycle
|
||||
|
||||
### Startup
|
||||
|
||||
```
|
||||
ccw view
|
||||
├─ Check if server running on port
|
||||
│ ├─ If yes: Send switch-path request
|
||||
│ └─ If no: Start new server
|
||||
├─ Launch browser (unless --no-browser)
|
||||
└─ Display dashboard URL
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
The dashboard server continues running until:
|
||||
- User explicitly stops it (Ctrl+C)
|
||||
- All connections close after timeout
|
||||
- System shutdown
|
||||
|
||||
### Multiple Workspaces
|
||||
|
||||
Switching to a different workspace keeps the same server instance:
|
||||
```
|
||||
Server State Before: workspace-a on port 3456
|
||||
ccw view --path ~/projects/workspace-b
|
||||
Server State After: workspace-b on port 3456 (same instance)
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
```bash
|
||||
# Set default port
|
||||
export CCW_VIEW_PORT=4000
|
||||
ccw view # Uses port 4000
|
||||
|
||||
# Set default host
|
||||
export CCW_VIEW_HOST=localhost
|
||||
ccw view --port 3456 # Binds to localhost:3456
|
||||
|
||||
# Disable browser launch by default
|
||||
export CCW_VIEW_NO_BROWSER=true
|
||||
ccw view # Won't auto-launch browser
|
||||
```
|
||||
|
||||
## Integration with CCW Workflows
|
||||
|
||||
The dashboard is fully integrated with CCW commands:
|
||||
|
||||
### Viewing Workflow Progress
|
||||
|
||||
```bash
|
||||
# Start a workflow
|
||||
ccw "Add user authentication"
|
||||
|
||||
# In another terminal, view progress
|
||||
ccw view # Shows execution progress in real-time
|
||||
```
|
||||
|
||||
### Session Management from Dashboard
|
||||
|
||||
- Start new session: Click "New Session" button
|
||||
- Resume paused session: Sessions list → Resume button
|
||||
- View session logs: Click session name
|
||||
- Complete session: Sessions list → Complete button
|
||||
|
||||
### Real-time Command Execution
|
||||
|
||||
- View active command chain execution
|
||||
- Watch command transition through Minimum Execution Units
|
||||
- See error alerts and recovery options
|
||||
- View command output logs
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port Already in Use
|
||||
|
||||
```bash
|
||||
# Use different port
|
||||
ccw view --port 3457
|
||||
|
||||
# Or kill existing server
|
||||
lsof -i :3456 # Find process
|
||||
kill -9 <pid> # Kill it
|
||||
ccw view # Start fresh
|
||||
```
|
||||
|
||||
### Dashboard Not Loading
|
||||
|
||||
```bash
|
||||
# Try without browser
|
||||
ccw view --no-browser
|
||||
|
||||
# Check server logs
|
||||
tail -f ~/.ccw/logs/dashboard.log
|
||||
|
||||
# Verify network access
|
||||
curl http://localhost:3456/api/health
|
||||
```
|
||||
|
||||
### Workspace Path Not Found
|
||||
|
||||
```bash
|
||||
# Use full absolute path
|
||||
ccw view --path "$(pwd)"
|
||||
|
||||
# Or specify explicit path
|
||||
ccw view --path ~/projects/my-project
|
||||
```
|
||||
|
||||
## Related Commands
|
||||
|
||||
- **`/ccw`** - Main workflow orchestrator
|
||||
- **`/workflow:session:list`** - List workflow sessions
|
||||
- **`/workflow:session:resume`** - Resume paused session
|
||||
- **`/memory:compact`** - Compact session memory for dashboard display
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Dashboard View
|
||||
|
||||
```bash
|
||||
cd ~/projects/my-app
|
||||
ccw view
|
||||
# → Launches http://localhost:3456 in browser
|
||||
```
|
||||
|
||||
### Network-Accessible Dashboard
|
||||
|
||||
```bash
|
||||
# Allow remote access
|
||||
ccw view --host 0.0.0.0 --port 3000
|
||||
# → Dashboard accessible at http://machine-ip:3000
|
||||
```
|
||||
|
||||
### Multiple Workspaces on Different Ports
|
||||
|
||||
```bash
|
||||
# Terminal 1: Main project
|
||||
ccw view --path ~/projects/main --port 3456
|
||||
|
||||
# Terminal 2: Side project
|
||||
ccw view --path ~/projects/side --port 3457
|
||||
|
||||
# View both simultaneously
|
||||
# → http://localhost:3456 (main)
|
||||
# → http://localhost:3457 (side)
|
||||
```
|
||||
|
||||
### Headless Dashboard
|
||||
|
||||
```bash
|
||||
# Run dashboard without browser
|
||||
ccw view --port 3000 --no-browser
|
||||
echo "Dashboard available at http://localhost:3000"
|
||||
|
||||
# Share URL with team
|
||||
# Can be proxied through nginx/port forwarding
|
||||
```
|
||||
|
||||
### Environment-Based Configuration
|
||||
|
||||
```bash
|
||||
# Script for CI/CD
|
||||
export CCW_VIEW_HOST=0.0.0.0
|
||||
export CCW_VIEW_PORT=8080
|
||||
ccw view --path /workspace
|
||||
|
||||
# → Dashboard accessible on port 8080 to all interfaces
|
||||
```
|
||||
|
||||
## Dashboard Pages
|
||||
|
||||
### Overview Page (`/`)
|
||||
- Current workflow status
|
||||
- Active sessions summary
|
||||
- Recent commands
|
||||
- System health indicators
|
||||
|
||||
### Sessions Page (`/sessions`)
|
||||
- All sessions (grouped by type)
|
||||
- Session details and metadata
|
||||
- Session logs viewer
|
||||
- Quick actions (resume/complete)
|
||||
|
||||
### Tasks Page (`/tasks`)
|
||||
- Current TODO items
|
||||
- Progress tracking
|
||||
- Inline task editing
|
||||
- Workflow history
|
||||
|
||||
### Workspace Page (`/workspace`)
|
||||
- Current workspace info
|
||||
- Available workspaces
|
||||
- Workspace switcher
|
||||
- Workspace settings
|
||||
|
||||
### Settings Page (`/settings`)
|
||||
- Port configuration
|
||||
- Theme preferences
|
||||
- Auto-refresh settings
|
||||
- Export settings
|
||||
|
||||
## Server Health Monitoring
|
||||
|
||||
The dashboard includes health monitoring:
|
||||
|
||||
```bash
|
||||
# Check health endpoint
|
||||
curl http://localhost:3456/api/health
|
||||
# → { "status": "ok", "uptime": 12345 }
|
||||
|
||||
# Monitor metrics
|
||||
curl http://localhost:3456/api/metrics
|
||||
# → { "sessions": 3, "tasks": 15, "lastUpdate": "2025-01-29T10:30:00Z" }
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Port with Dynamic Discovery
|
||||
|
||||
```bash
|
||||
# Find next available port
|
||||
available_port=$(find-available-port 3456)
|
||||
ccw view --port $available_port
|
||||
|
||||
# Display in CI/CD
|
||||
echo "Dashboard: http://localhost:$available_port"
|
||||
```
|
||||
|
||||
### Dashboard Behind Proxy
|
||||
|
||||
```bash
|
||||
# Configure nginx reverse proxy
|
||||
# Proxy http://proxy.example.com/dashboard → http://localhost:3456
|
||||
|
||||
ccw view --host 127.0.0.1 --port 3456
|
||||
|
||||
# Access via proxy
|
||||
# http://proxy.example.com/dashboard
|
||||
```
|
||||
|
||||
### Session Export from Dashboard
|
||||
|
||||
- View → Sessions → Export JSON
|
||||
- Exports session metadata and progress
|
||||
- Useful for record-keeping and reporting
|
||||
|
||||
## See Also
|
||||
|
||||
- **CCW Commands**: `/ccw` - Auto workflow orchestration
|
||||
- **Session Management**: `/workflow:session:start`, `/workflow:session:list`
|
||||
- **Task Tracking**: `TodoWrite` tool for programmatic task management
|
||||
- **Workflow Status**: `/workflow:status` for CLI-based status view
|
||||
@@ -1,417 +0,0 @@
|
||||
---
|
||||
name: action-plan-verify
|
||||
description: Perform non-destructive cross-artifact consistency analysis between IMPL_PLAN.md and task JSONs with quality gate validation
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
allowed-tools: Read(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Goal
|
||||
|
||||
Identify inconsistencies, duplications, ambiguities, and underspecified items between action planning artifacts (`IMPL_PLAN.md`, `task.json`) and brainstorming artifacts (`role analysis documents`) before implementation. This command MUST run only after `/workflow:plan` has successfully produced complete `IMPL_PLAN.md` and task JSON files.
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands).
|
||||
|
||||
**Synthesis Authority**: The `role analysis documents` is **authoritative** for requirements and design decisions. Any conflicts between IMPL_PLAN/tasks and synthesis are automatically CRITICAL and require adjustment of the plan/tasks—not reinterpretation of requirements.
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### 1. Initialize Analysis Context
|
||||
|
||||
```bash
|
||||
# Detect active workflow session
|
||||
IF --session parameter provided:
|
||||
session_id = provided session
|
||||
ELSE:
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
ELSE:
|
||||
ERROR: "No active workflow session found. Use --session <session-id>"
|
||||
EXIT
|
||||
|
||||
# Derive absolute paths
|
||||
session_dir = .workflow/WFS-{session}
|
||||
brainstorm_dir = session_dir/.brainstorming
|
||||
task_dir = session_dir/.task
|
||||
|
||||
# Validate required artifacts
|
||||
SYNTHESIS = brainstorm_dir/role analysis documents
|
||||
IMPL_PLAN = session_dir/IMPL_PLAN.md
|
||||
TASK_FILES = Glob(task_dir/*.json)
|
||||
|
||||
# Abort if missing
|
||||
IF NOT EXISTS(SYNTHESIS):
|
||||
ERROR: "role analysis documents not found. Run /workflow:brainstorm:synthesis first"
|
||||
EXIT
|
||||
|
||||
IF NOT EXISTS(IMPL_PLAN):
|
||||
ERROR: "IMPL_PLAN.md not found. Run /workflow:plan first"
|
||||
EXIT
|
||||
|
||||
IF TASK_FILES.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:plan first"
|
||||
EXIT
|
||||
```
|
||||
|
||||
### 2. Load Artifacts (Progressive Disclosure)
|
||||
|
||||
Load only minimal necessary context from each artifact:
|
||||
|
||||
**From workflow-session.json** (NEW - PRIMARY REFERENCE):
|
||||
- Original user prompt/intent (project or description field)
|
||||
- User's stated goals and objectives
|
||||
- User's scope definition
|
||||
|
||||
**From role analysis documents**:
|
||||
- Functional Requirements (IDs, descriptions, acceptance criteria)
|
||||
- Non-Functional Requirements (IDs, targets)
|
||||
- Business Requirements (IDs, success metrics)
|
||||
- Key Architecture Decisions
|
||||
- Risk factors and mitigation strategies
|
||||
- Implementation Roadmap (high-level phases)
|
||||
|
||||
**From IMPL_PLAN.md**:
|
||||
- Summary and objectives
|
||||
- Context Analysis
|
||||
- Implementation Strategy
|
||||
- Task Breakdown Summary
|
||||
- Success Criteria
|
||||
- Brainstorming Artifacts References (if present)
|
||||
|
||||
**From task.json files**:
|
||||
- Task IDs
|
||||
- Titles and descriptions
|
||||
- Status
|
||||
- Dependencies (depends_on, blocks)
|
||||
- Context (requirements, focus_paths, acceptance, artifacts)
|
||||
- Flow control (pre_analysis, implementation_approach)
|
||||
- Meta (complexity, priority, use_codex)
|
||||
|
||||
### 3. Build Semantic Models
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
**Requirements inventory**:
|
||||
- Each functional/non-functional/business requirement with stable ID
|
||||
- Requirement text, acceptance criteria, priority
|
||||
|
||||
**Architecture decisions inventory**:
|
||||
- ADRs from synthesis
|
||||
- Technology choices
|
||||
- Data model references
|
||||
|
||||
**Task coverage mapping**:
|
||||
- Map each task to one or more requirements (by ID reference or keyword inference)
|
||||
- Map each requirement to covering tasks
|
||||
|
||||
**Dependency graph**:
|
||||
- Task-to-task dependencies (depends_on, blocks)
|
||||
- Requirement-level dependencies (from synthesis)
|
||||
|
||||
### 4. Detection Passes (Token-Efficient Analysis)
|
||||
|
||||
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
|
||||
|
||||
#### A. User Intent Alignment (NEW - CRITICAL)
|
||||
|
||||
- **Goal Alignment**: IMPL_PLAN objectives match user's original intent
|
||||
- **Scope Drift**: Plan covers user's stated scope without unauthorized expansion
|
||||
- **Success Criteria Match**: Plan's success criteria reflect user's expectations
|
||||
- **Intent Conflicts**: Tasks contradicting user's original objectives
|
||||
|
||||
#### B. Requirements Coverage Analysis
|
||||
|
||||
- **Orphaned Requirements**: Requirements in synthesis with zero associated tasks
|
||||
- **Unmapped Tasks**: Tasks with no clear requirement linkage
|
||||
- **NFR Coverage Gaps**: Non-functional requirements (performance, security, scalability) not reflected in tasks
|
||||
|
||||
#### B. Consistency Validation
|
||||
|
||||
- **Requirement Conflicts**: Tasks contradicting synthesis requirements
|
||||
- **Architecture Drift**: IMPL_PLAN architecture not matching synthesis ADRs
|
||||
- **Terminology Drift**: Same concept named differently across IMPL_PLAN and tasks
|
||||
- **Data Model Inconsistency**: Tasks referencing entities/fields not in synthesis data model
|
||||
|
||||
#### C. Dependency Integrity
|
||||
|
||||
- **Circular Dependencies**: Task A depends on B, B depends on C, C depends on A
|
||||
- **Missing Dependencies**: Task requires outputs from another task but no explicit dependency
|
||||
- **Broken Dependencies**: Task depends on non-existent task ID
|
||||
- **Logical Ordering Issues**: Implementation tasks before foundational setup without dependency note
|
||||
|
||||
#### D. Synthesis Alignment
|
||||
|
||||
- **Priority Conflicts**: High-priority synthesis requirements mapped to low-priority tasks
|
||||
- **Success Criteria Mismatch**: IMPL_PLAN success criteria not covering synthesis acceptance criteria
|
||||
- **Risk Mitigation Gaps**: Critical risks in synthesis without corresponding mitigation tasks
|
||||
|
||||
#### E. Task Specification Quality
|
||||
|
||||
- **Ambiguous Focus Paths**: Tasks with vague or missing focus_paths
|
||||
- **Underspecified Acceptance**: Tasks without clear acceptance criteria
|
||||
- **Missing Artifacts References**: Tasks not referencing relevant brainstorming artifacts in context.artifacts
|
||||
- **Weak Flow Control**: Tasks without clear implementation_approach or pre_analysis steps
|
||||
- **Missing Target Files**: Tasks without flow_control.target_files specification
|
||||
|
||||
#### F. Duplication Detection
|
||||
|
||||
- **Overlapping Task Scope**: Multiple tasks with nearly identical descriptions
|
||||
- **Redundant Requirements Coverage**: Same requirement covered by multiple tasks without clear partitioning
|
||||
|
||||
#### G. Feasibility Assessment
|
||||
|
||||
- **Complexity Misalignment**: Task marked "simple" but requires multiple file modifications
|
||||
- **Resource Conflicts**: Parallel tasks requiring same resources/files
|
||||
- **Skill Gap Risks**: Tasks requiring skills not in team capability assessment (from synthesis)
|
||||
|
||||
### 5. Severity Assignment
|
||||
|
||||
Use this heuristic to prioritize findings:
|
||||
|
||||
- **CRITICAL**:
|
||||
- Violates user's original intent (goal misalignment, scope drift)
|
||||
- Violates synthesis authority (requirement conflict)
|
||||
- Core requirement with zero coverage
|
||||
- Circular dependencies
|
||||
- Broken dependencies
|
||||
|
||||
- **HIGH**:
|
||||
- NFR coverage gaps
|
||||
- Priority conflicts
|
||||
- Missing risk mitigation tasks
|
||||
- Ambiguous acceptance criteria
|
||||
|
||||
- **MEDIUM**:
|
||||
- Terminology drift
|
||||
- Missing artifacts references
|
||||
- Weak flow control
|
||||
- Logical ordering issues
|
||||
|
||||
- **LOW**:
|
||||
- Style/wording improvements
|
||||
- Minor redundancy not affecting execution
|
||||
|
||||
### 6. Produce Compact Analysis Report
|
||||
|
||||
Output a Markdown report (no file writes) with the following structure:
|
||||
|
||||
```markdown
|
||||
## Action Plan Verification Report
|
||||
|
||||
**Session**: WFS-{session-id}
|
||||
**Generated**: {timestamp}
|
||||
**Artifacts Analyzed**: role analysis documents, IMPL_PLAN.md, {N} task files
|
||||
|
||||
---
|
||||
|
||||
### Executive Summary
|
||||
|
||||
- **Overall Risk Level**: CRITICAL | HIGH | MEDIUM | LOW
|
||||
- **Recommendation**: BLOCK_EXECUTION | PROCEED_WITH_FIXES | PROCEED_WITH_CAUTION | PROCEED
|
||||
- **Critical Issues**: {count}
|
||||
- **High Issues**: {count}
|
||||
- **Medium Issues**: {count}
|
||||
- **Low Issues**: {count}
|
||||
|
||||
---
|
||||
|
||||
### Findings Summary
|
||||
|
||||
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
||||
|----|----------|----------|-------------|---------|----------------|
|
||||
| C1 | Coverage | CRITICAL | synthesis:FR-03 | Requirement "User auth" has zero task coverage | Add authentication implementation task |
|
||||
| H1 | Consistency | HIGH | IMPL-1.2 vs synthesis:ADR-02 | Task uses REST while synthesis specifies GraphQL | Align task with ADR-02 decision |
|
||||
| M1 | Specification | MEDIUM | IMPL-2.1 | Missing context.artifacts reference | Add @synthesis reference |
|
||||
| L1 | Duplication | LOW | IMPL-3.1, IMPL-3.2 | Similar scope | Consider merging |
|
||||
|
||||
(Add one row per finding; generate stable IDs prefixed by severity initial.)
|
||||
|
||||
---
|
||||
|
||||
### Requirements Coverage Analysis
|
||||
|
||||
| Requirement ID | Requirement Summary | Has Task? | Task IDs | Priority Match | Notes |
|
||||
|----------------|---------------------|-----------|----------|----------------|-------|
|
||||
| FR-01 | User authentication | Yes | IMPL-1.1, IMPL-1.2 | Match | Complete |
|
||||
| FR-02 | Data export | Yes | IMPL-2.3 | Mismatch | High req → Med priority task |
|
||||
| FR-03 | Profile management | No | - | - | **CRITICAL: Zero coverage** |
|
||||
| NFR-01 | Response time <200ms | No | - | - | **HIGH: No performance tasks** |
|
||||
|
||||
**Coverage Metrics**:
|
||||
- Functional Requirements: 85% (17/20 covered)
|
||||
- Non-Functional Requirements: 40% (2/5 covered)
|
||||
- Business Requirements: 100% (5/5 covered)
|
||||
|
||||
---
|
||||
|
||||
### Unmapped Tasks
|
||||
|
||||
| Task ID | Title | Issue | Recommendation |
|
||||
|---------|-------|-------|----------------|
|
||||
| IMPL-4.5 | Refactor utils | No requirement linkage | Link to technical debt or remove |
|
||||
|
||||
---
|
||||
|
||||
### Dependency Graph Issues
|
||||
|
||||
**Circular Dependencies**: None detected
|
||||
|
||||
**Broken Dependencies**:
|
||||
- IMPL-2.3 depends on "IMPL-2.4" (non-existent)
|
||||
|
||||
**Logical Ordering Issues**:
|
||||
- IMPL-5.1 (integration test) has no dependency on IMPL-1.* (implementation tasks)
|
||||
|
||||
---
|
||||
|
||||
### Synthesis Alignment Issues
|
||||
|
||||
| Issue Type | Synthesis Reference | IMPL_PLAN/Task | Impact | Recommendation |
|
||||
|------------|---------------------|----------------|--------|----------------|
|
||||
| Architecture Conflict | synthesis:ADR-01 (JWT auth) | IMPL_PLAN uses session cookies | HIGH | Update IMPL_PLAN to use JWT |
|
||||
| Priority Mismatch | synthesis:FR-02 (High) | IMPL-2.3 (Medium) | MEDIUM | Elevate task priority |
|
||||
| Missing Risk Mitigation | synthesis:Risk-03 (API rate limits) | No mitigation tasks | HIGH | Add rate limiting implementation task |
|
||||
|
||||
---
|
||||
|
||||
### Task Specification Quality Issues
|
||||
|
||||
**Missing Artifacts References**: 12 tasks lack context.artifacts
|
||||
**Weak Flow Control**: 5 tasks lack implementation_approach
|
||||
**Missing Target Files**: 8 tasks lack flow_control.target_files
|
||||
|
||||
**Sample Issues**:
|
||||
- IMPL-1.2: No context.artifacts reference to synthesis
|
||||
- IMPL-3.1: Missing flow_control.target_files specification
|
||||
- IMPL-4.2: Vague focus_paths ["src/"] - needs refinement
|
||||
|
||||
---
|
||||
|
||||
### Feasibility Concerns
|
||||
|
||||
| Concern | Tasks Affected | Issue | Recommendation |
|
||||
|---------|----------------|-------|----------------|
|
||||
| Skill Gap | IMPL-6.1, IMPL-6.2 | Requires Kubernetes expertise not in team | Add training task or external consultant |
|
||||
| Resource Conflict | IMPL-3.1, IMPL-3.2 | Both modify src/auth/service.ts in parallel | Add dependency or serialize |
|
||||
|
||||
---
|
||||
|
||||
### Metrics
|
||||
|
||||
- **Total Requirements**: 30 (20 functional, 5 non-functional, 5 business)
|
||||
- **Total Tasks**: 25
|
||||
- **Overall Coverage**: 77% (23/30 requirements with ≥1 task)
|
||||
- **Critical Issues**: 2
|
||||
- **High Issues**: 5
|
||||
- **Medium Issues**: 8
|
||||
- **Low Issues**: 3
|
||||
|
||||
---
|
||||
|
||||
### Next Actions
|
||||
|
||||
#### Action Recommendations
|
||||
|
||||
**If CRITICAL Issues Exist**:
|
||||
- **BLOCK EXECUTION** - Resolve critical issues before proceeding
|
||||
- Use TodoWrite to track all required fixes
|
||||
- Fix broken dependencies and circular references
|
||||
|
||||
**If Only HIGH/MEDIUM/LOW Issues**:
|
||||
- **PROCEED WITH CAUTION** - Fix high-priority issues first
|
||||
- Use TodoWrite to systematically track and complete all improvements
|
||||
|
||||
#### TodoWrite-Based Remediation Workflow
|
||||
|
||||
**Report Location**: `.workflow/WFS-{session}/.process/ACTION_PLAN_VERIFICATION.md`
|
||||
|
||||
**Recommended Workflow**:
|
||||
1. **Create TodoWrite Task List**: Extract all findings from report
|
||||
2. **Process by Priority**: CRITICAL → HIGH → MEDIUM → LOW
|
||||
3. **Complete Each Fix**: Mark tasks as in_progress/completed as you work
|
||||
4. **Validate Changes**: Verify each modification against requirements
|
||||
|
||||
**TodoWrite Task Structure Example**:
|
||||
```markdown
|
||||
Priority Order:
|
||||
1. Fix coverage gaps (CRITICAL)
|
||||
2. Resolve consistency conflicts (CRITICAL/HIGH)
|
||||
3. Add missing specifications (MEDIUM)
|
||||
4. Improve task quality (LOW)
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
- TodoWrite provides real-time progress tracking
|
||||
- Each finding becomes a trackable todo item
|
||||
- User can monitor progress throughout remediation
|
||||
- Architecture drift in IMPL_PLAN requires manual editing
|
||||
```
|
||||
|
||||
### 7. Save Report and Execute TodoWrite-Based Remediation
|
||||
|
||||
**Save Analysis Report**:
|
||||
```bash
|
||||
report_path = ".workflow/WFS-{session}/.process/ACTION_PLAN_VERIFICATION.md"
|
||||
Write(report_path, full_report_content)
|
||||
```
|
||||
|
||||
**After Report Generation**:
|
||||
|
||||
1. **Extract Findings**: Parse all issues by severity
|
||||
2. **Create TodoWrite Task List**: Convert findings to actionable todos
|
||||
3. **Execute Fixes**: Process each todo systematically
|
||||
4. **Update Task Files**: Apply modifications directly to task JSON files
|
||||
5. **Update IMPL_PLAN**: Apply strategic changes if needed
|
||||
|
||||
At end of report, provide remediation guidance:
|
||||
|
||||
```markdown
|
||||
### 🔧 Remediation Workflow
|
||||
|
||||
**Recommended Approach**:
|
||||
1. **Initialize TodoWrite**: Create comprehensive task list from all findings
|
||||
2. **Process by Severity**: Start with CRITICAL, then HIGH, MEDIUM, LOW
|
||||
3. **Apply Fixes Directly**: Modify task.json files and IMPL_PLAN.md as needed
|
||||
4. **Track Progress**: Mark todos as completed after each fix
|
||||
|
||||
**TodoWrite Execution Pattern**:
|
||||
```bash
|
||||
# Step 1: Create task list from verification report
|
||||
TodoWrite([
|
||||
{ content: "Fix FR-03 coverage gap - add authentication task", status: "pending", activeForm: "Fixing FR-03 coverage gap" },
|
||||
{ content: "Fix IMPL-1.2 consistency - align with ADR-02", status: "pending", activeForm: "Fixing IMPL-1.2 consistency" },
|
||||
{ content: "Add context.artifacts to IMPL-1.2", status: "pending", activeForm: "Adding context.artifacts to IMPL-1.2" },
|
||||
# ... additional todos for each finding
|
||||
])
|
||||
|
||||
# Step 2: Process each todo systematically
|
||||
# Mark as in_progress when starting
|
||||
# Apply fix using Read/Edit tools
|
||||
# Mark as completed when done
|
||||
# Move to next priority item
|
||||
```
|
||||
|
||||
**File Modification Workflow**:
|
||||
```bash
|
||||
# For task JSON modifications:
|
||||
1. Read(.workflow/WFS-{session}/.task/IMPL-X.Y.json)
|
||||
2. Edit() to apply fixes
|
||||
3. Mark todo as completed
|
||||
|
||||
# For IMPL_PLAN modifications:
|
||||
1. Read(.workflow/WFS-{session}/IMPL_PLAN.md)
|
||||
2. Edit() to apply strategic changes
|
||||
3. Mark todo as completed
|
||||
```
|
||||
|
||||
**Note**: All fixes execute immediately after user confirmation without additional commands.
|
||||
804
.claude/commands/workflow/analyze-with-file.md
Normal file
804
.claude/commands/workflow/analyze-with-file.md
Normal file
@@ -0,0 +1,804 @@
|
||||
---
|
||||
name: analyze-with-file
|
||||
description: Interactive collaborative analysis with documented discussions, CLI-assisted exploration, and evolving understanding
|
||||
argument-hint: "[-y|--yes] [-c|--continue] \"topic or question\""
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm exploration decisions, use recommended analysis angles.
|
||||
|
||||
# Workflow Analyze-With-File Command (/workflow:analyze-with-file)
|
||||
|
||||
## Overview
|
||||
|
||||
Interactive collaborative analysis workflow with **documented discussion process**. Records understanding evolution, facilitates multi-round Q&A, and uses CLI tools (Gemini/Codex) for deep exploration.
|
||||
|
||||
**Core workflow**: Topic → Explore → Discuss → Document → Refine → Conclude
|
||||
|
||||
**Key features**:
|
||||
- **discussion.md**: Timeline of discussions and understanding evolution
|
||||
- **Multi-round Q&A**: Iterative clarification with user
|
||||
- **CLI-assisted exploration**: Gemini/Codex for codebase and concept analysis
|
||||
- **Consolidated insights**: Synthesizes discussions into actionable conclusions
|
||||
- **Flexible continuation**: Resume analysis sessions to build on previous work
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/workflow:analyze-with-file [FLAGS] <TOPIC_OR_QUESTION>
|
||||
|
||||
# Flags
|
||||
-y, --yes Skip confirmations, use recommended settings
|
||||
-c, --continue Continue existing session (auto-detected if exists)
|
||||
|
||||
# Arguments
|
||||
<topic-or-question> Analysis topic, question, or concept to explore (required)
|
||||
|
||||
# Examples
|
||||
/workflow:analyze-with-file "如何优化这个项目的认证架构"
|
||||
/workflow:analyze-with-file --continue "认证架构" # Continue existing session
|
||||
/workflow:analyze-with-file -y "性能瓶颈分析" # Auto mode
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Session Detection:
|
||||
├─ Check if analysis session exists for topic
|
||||
├─ EXISTS + discussion.md exists → Continue mode
|
||||
└─ NOT_FOUND → New session mode
|
||||
|
||||
Phase 1: Topic Understanding
|
||||
├─ Parse topic/question
|
||||
├─ Identify analysis dimensions (architecture, implementation, concept, etc.)
|
||||
├─ Initial scoping with user (AskUserQuestion)
|
||||
└─ Document initial understanding in discussion.md
|
||||
|
||||
Phase 2: CLI Exploration (Parallel)
|
||||
├─ Launch cli-explore-agent for codebase context
|
||||
├─ Use Gemini/Codex for deep analysis
|
||||
└─ Aggregate findings into exploration summary
|
||||
|
||||
Phase 3: Interactive Discussion (Multi-Round)
|
||||
├─ Present exploration findings
|
||||
├─ Facilitate Q&A with user (AskUserQuestion)
|
||||
├─ Capture user insights and requirements
|
||||
├─ Update discussion.md with each round
|
||||
└─ Repeat until user is satisfied or clarity achieved
|
||||
|
||||
Phase 4: Synthesis & Conclusion
|
||||
├─ Consolidate all insights
|
||||
├─ Update discussion.md with conclusions
|
||||
├─ Generate actionable recommendations
|
||||
└─ Optional: Create follow-up tasks or issues
|
||||
|
||||
Output:
|
||||
├─ .workflow/.analysis/{slug}-{date}/discussion.md (evolving document)
|
||||
├─ .workflow/.analysis/{slug}-{date}/explorations.json (CLI findings)
|
||||
└─ .workflow/.analysis/{slug}-{date}/conclusions.json (final synthesis)
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Session Setup & Mode Detection
|
||||
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const topicSlug = topic_or_question.toLowerCase().replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10)
|
||||
|
||||
const sessionId = `ANL-${topicSlug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.analysis/${sessionId}`
|
||||
const discussionPath = `${sessionFolder}/discussion.md`
|
||||
const explorationsPath = `${sessionFolder}/explorations.json`
|
||||
const conclusionsPath = `${sessionFolder}/conclusions.json`
|
||||
|
||||
// Auto-detect mode
|
||||
const sessionExists = fs.existsSync(sessionFolder)
|
||||
const hasDiscussion = sessionExists && fs.existsSync(discussionPath)
|
||||
const forcesContinue = $ARGUMENTS.includes('--continue') || $ARGUMENTS.includes('-c')
|
||||
|
||||
const mode = (hasDiscussion || forcesContinue) ? 'continue' : 'new'
|
||||
|
||||
if (!sessionExists) {
|
||||
bash(`mkdir -p ${sessionFolder}`)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 1: Topic Understanding
|
||||
|
||||
**Step 1.1: Parse Topic & Identify Dimensions**
|
||||
|
||||
```javascript
|
||||
// Analyze topic to determine analysis dimensions
|
||||
const ANALYSIS_DIMENSIONS = {
|
||||
architecture: ['架构', 'architecture', 'design', 'structure', '设计'],
|
||||
implementation: ['实现', 'implement', 'code', 'coding', '代码'],
|
||||
performance: ['性能', 'performance', 'optimize', 'bottleneck', '优化'],
|
||||
security: ['安全', 'security', 'auth', 'permission', '权限'],
|
||||
concept: ['概念', 'concept', 'theory', 'principle', '原理'],
|
||||
comparison: ['比较', 'compare', 'vs', 'difference', '区别'],
|
||||
decision: ['决策', 'decision', 'choice', 'tradeoff', '选择']
|
||||
}
|
||||
|
||||
function identifyDimensions(topic) {
|
||||
const text = topic.toLowerCase()
|
||||
const matched = []
|
||||
|
||||
for (const [dimension, keywords] of Object.entries(ANALYSIS_DIMENSIONS)) {
|
||||
if (keywords.some(k => text.includes(k))) {
|
||||
matched.push(dimension)
|
||||
}
|
||||
}
|
||||
|
||||
return matched.length > 0 ? matched : ['general']
|
||||
}
|
||||
|
||||
const dimensions = identifyDimensions(topic_or_question)
|
||||
```
|
||||
|
||||
**Step 1.2: Initial Scoping (New Session Only)**
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (mode === 'new' && !autoYes) {
|
||||
// Ask user to scope the analysis
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `分析范围: "${topic_or_question}"\n\n您想重点关注哪些方面?`,
|
||||
header: "Focus",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "代码实现", description: "分析现有代码实现" },
|
||||
{ label: "架构设计", description: "架构层面的分析" },
|
||||
{ label: "最佳实践", description: "行业最佳实践对比" },
|
||||
{ label: "问题诊断", description: "识别潜在问题" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "分析深度?",
|
||||
header: "Depth",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Quick Overview", description: "快速概览 (10-15分钟)" },
|
||||
{ label: "Standard Analysis", description: "标准分析 (30-60分钟)" },
|
||||
{ label: "Deep Dive", description: "深度分析 (1-2小时)" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Step 1.3: Create/Update discussion.md**
|
||||
|
||||
For new session:
|
||||
```markdown
|
||||
# Analysis Discussion
|
||||
|
||||
**Session ID**: ${sessionId}
|
||||
**Topic**: ${topic_or_question}
|
||||
**Started**: ${getUtc8ISOString()}
|
||||
**Dimensions**: ${dimensions.join(', ')}
|
||||
|
||||
---
|
||||
|
||||
## User Context
|
||||
|
||||
**Focus Areas**: ${userFocusAreas.join(', ')}
|
||||
**Analysis Depth**: ${analysisDepth}
|
||||
|
||||
---
|
||||
|
||||
## Discussion Timeline
|
||||
|
||||
### Round 1 - Initial Understanding (${timestamp})
|
||||
|
||||
#### Topic Analysis
|
||||
|
||||
Based on the topic "${topic_or_question}":
|
||||
|
||||
- **Primary dimensions**: ${dimensions.join(', ')}
|
||||
- **Initial scope**: ${initialScope}
|
||||
- **Key questions to explore**:
|
||||
- ${question1}
|
||||
- ${question2}
|
||||
- ${question3}
|
||||
|
||||
#### Next Steps
|
||||
|
||||
- Launch CLI exploration for codebase context
|
||||
- Gather external insights via Gemini
|
||||
- Prepare discussion points for user
|
||||
|
||||
---
|
||||
|
||||
## Current Understanding
|
||||
|
||||
${initialUnderstanding}
|
||||
```
|
||||
|
||||
For continue session, append:
|
||||
```markdown
|
||||
### Round ${n} - Continuation (${timestamp})
|
||||
|
||||
#### Previous Context
|
||||
|
||||
Resuming analysis based on prior discussion.
|
||||
|
||||
#### New Focus
|
||||
|
||||
${newFocusFromUser}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: CLI Exploration
|
||||
|
||||
**Step 2.1: Launch Parallel Explorations**
|
||||
|
||||
```javascript
|
||||
const explorationPromises = []
|
||||
|
||||
// CLI Explore Agent for codebase
|
||||
if (dimensions.includes('implementation') || dimensions.includes('architecture')) {
|
||||
explorationPromises.push(
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Explore codebase: ${topicSlug}`,
|
||||
prompt=`
|
||||
## Analysis Context
|
||||
Topic: ${topic_or_question}
|
||||
Dimensions: ${dimensions.join(', ')}
|
||||
Session: ${sessionFolder}
|
||||
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Run: ccw tool exec get_modules_by_depth '{}'
|
||||
2. Execute relevant searches based on topic keywords
|
||||
3. Read: .workflow/project-tech.json (if exists)
|
||||
|
||||
## Exploration Focus
|
||||
${dimensions.map(d => `- ${d}: Identify relevant code patterns and structures`).join('\n')}
|
||||
|
||||
## Output
|
||||
Write findings to: ${sessionFolder}/exploration-codebase.json
|
||||
|
||||
Schema:
|
||||
{
|
||||
"relevant_files": [{path, relevance, rationale}],
|
||||
"patterns": [],
|
||||
"key_findings": [],
|
||||
"questions_for_user": [],
|
||||
"_metadata": { "exploration_type": "codebase", "timestamp": "..." }
|
||||
}
|
||||
`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
// Gemini CLI for deep analysis
|
||||
explorationPromises.push(
|
||||
Bash({
|
||||
command: `ccw cli -p "
|
||||
PURPOSE: Analyze topic '${topic_or_question}' from ${dimensions.join(', ')} perspectives
|
||||
Success criteria: Actionable insights with clear reasoning
|
||||
|
||||
TASK:
|
||||
• Identify key considerations for this topic
|
||||
• Analyze common patterns and anti-patterns
|
||||
• Highlight potential issues or opportunities
|
||||
• Generate discussion points for user clarification
|
||||
|
||||
MODE: analysis
|
||||
|
||||
CONTEXT: @**/* | Topic: ${topic_or_question}
|
||||
|
||||
EXPECTED:
|
||||
- Structured analysis with clear sections
|
||||
- Specific insights tied to evidence
|
||||
- Questions to deepen understanding
|
||||
- Recommendations with rationale
|
||||
|
||||
CONSTRAINTS: Focus on ${dimensions.join(', ')}
|
||||
" --tool gemini --mode analysis`,
|
||||
run_in_background: true
|
||||
})
|
||||
)
|
||||
```
|
||||
|
||||
**Step 2.2: Aggregate Findings**
|
||||
|
||||
```javascript
|
||||
// After explorations complete, aggregate into explorations.json
|
||||
const explorations = {
|
||||
session_id: sessionId,
|
||||
timestamp: getUtc8ISOString(),
|
||||
topic: topic_or_question,
|
||||
dimensions: dimensions,
|
||||
sources: [
|
||||
{ type: "codebase", file: "exploration-codebase.json" },
|
||||
{ type: "gemini", summary: geminiOutput }
|
||||
],
|
||||
key_findings: [...],
|
||||
discussion_points: [...],
|
||||
open_questions: [...]
|
||||
}
|
||||
|
||||
Write(explorationsPath, JSON.stringify(explorations, null, 2))
|
||||
```
|
||||
|
||||
**Step 2.3: Update discussion.md**
|
||||
|
||||
```markdown
|
||||
#### Exploration Results (${timestamp})
|
||||
|
||||
**Sources Analyzed**:
|
||||
${sources.map(s => `- ${s.type}: ${s.summary}`).join('\n')}
|
||||
|
||||
**Key Findings**:
|
||||
${keyFindings.map((f, i) => `${i+1}. ${f}`).join('\n')}
|
||||
|
||||
**Points for Discussion**:
|
||||
${discussionPoints.map((p, i) => `${i+1}. ${p}`).join('\n')}
|
||||
|
||||
**Open Questions**:
|
||||
${openQuestions.map((q, i) => `- ${q}`).join('\n')}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Interactive Discussion (Multi-Round)
|
||||
|
||||
**Step 3.1: Present Findings & Gather Feedback**
|
||||
|
||||
```javascript
|
||||
// Maximum discussion rounds
|
||||
const MAX_ROUNDS = 5
|
||||
let roundNumber = 1
|
||||
let discussionComplete = false
|
||||
|
||||
while (!discussionComplete && roundNumber <= MAX_ROUNDS) {
|
||||
// Display current findings
|
||||
console.log(`
|
||||
## Discussion Round ${roundNumber}
|
||||
|
||||
${currentFindings}
|
||||
|
||||
### Key Points for Your Input
|
||||
${discussionPoints.map((p, i) => `${i+1}. ${p}`).join('\n')}
|
||||
`)
|
||||
|
||||
// Gather user input
|
||||
const userResponse = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "对以上分析有什么看法或补充?",
|
||||
header: "Feedback",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "同意,继续深入", description: "分析方向正确,继续探索" },
|
||||
{ label: "需要调整方向", description: "我有不同的理解或重点" },
|
||||
{ label: "分析完成", description: "已获得足够信息" },
|
||||
{ label: "有具体问题", description: "我想问一些具体问题" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
// Process user response
|
||||
switch (userResponse.feedback) {
|
||||
case "同意,继续深入":
|
||||
// Deepen analysis in current direction
|
||||
await deepenAnalysis()
|
||||
break
|
||||
case "需要调整方向":
|
||||
// Get user's adjusted focus
|
||||
const adjustment = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "请说明您希望调整的方向或重点:",
|
||||
header: "Direction",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "更多代码细节", description: "深入代码实现" },
|
||||
{ label: "更多架构视角", description: "关注整体设计" },
|
||||
{ label: "更多实践对比", description: "对比最佳实践" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
await adjustAnalysisDirection(adjustment)
|
||||
break
|
||||
case "分析完成":
|
||||
discussionComplete = true
|
||||
break
|
||||
case "有具体问题":
|
||||
// Let user ask specific questions, then answer
|
||||
await handleUserQuestions()
|
||||
break
|
||||
}
|
||||
|
||||
// Update discussion.md with this round
|
||||
updateDiscussionDocument(roundNumber, userResponse, findings)
|
||||
roundNumber++
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3.2: Document Each Round**
|
||||
|
||||
Append to discussion.md:
|
||||
```markdown
|
||||
### Round ${n} - Discussion (${timestamp})
|
||||
|
||||
#### User Input
|
||||
|
||||
${userInputSummary}
|
||||
|
||||
${userResponse === 'adjustment' ? `
|
||||
**Direction Adjustment**: ${adjustmentDetails}
|
||||
` : ''}
|
||||
|
||||
${userResponse === 'questions' ? `
|
||||
**User Questions**:
|
||||
${userQuestions.map((q, i) => `${i+1}. ${q}`).join('\n')}
|
||||
|
||||
**Answers**:
|
||||
${answers.map((a, i) => `${i+1}. ${a}`).join('\n')}
|
||||
` : ''}
|
||||
|
||||
#### Updated Understanding
|
||||
|
||||
Based on user feedback:
|
||||
- ${insight1}
|
||||
- ${insight2}
|
||||
|
||||
#### Corrected Assumptions
|
||||
|
||||
${corrections.length > 0 ? corrections.map(c => `
|
||||
- ~~${c.wrong}~~ → ${c.corrected}
|
||||
- Reason: ${c.reason}
|
||||
`).join('\n') : 'None'}
|
||||
|
||||
#### New Insights
|
||||
|
||||
${newInsights.map(i => `- ${i}`).join('\n')}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Synthesis & Conclusion
|
||||
|
||||
**Step 4.1: Consolidate Insights**
|
||||
|
||||
```javascript
|
||||
const conclusions = {
|
||||
session_id: sessionId,
|
||||
topic: topic_or_question,
|
||||
completed: getUtc8ISOString(),
|
||||
total_rounds: roundNumber,
|
||||
|
||||
summary: "...",
|
||||
|
||||
key_conclusions: [
|
||||
{ point: "...", evidence: "...", confidence: "high|medium|low" }
|
||||
],
|
||||
|
||||
recommendations: [
|
||||
{ action: "...", rationale: "...", priority: "high|medium|low" }
|
||||
],
|
||||
|
||||
open_questions: [...],
|
||||
|
||||
follow_up_suggestions: [
|
||||
{ type: "issue", summary: "..." },
|
||||
{ type: "task", summary: "..." }
|
||||
]
|
||||
}
|
||||
|
||||
Write(conclusionsPath, JSON.stringify(conclusions, null, 2))
|
||||
```
|
||||
|
||||
**Step 4.2: Final discussion.md Update**
|
||||
|
||||
```markdown
|
||||
---
|
||||
|
||||
## Conclusions (${timestamp})
|
||||
|
||||
### Summary
|
||||
|
||||
${summaryParagraph}
|
||||
|
||||
### Key Conclusions
|
||||
|
||||
${conclusions.key_conclusions.map((c, i) => `
|
||||
${i+1}. **${c.point}** (Confidence: ${c.confidence})
|
||||
- Evidence: ${c.evidence}
|
||||
`).join('\n')}
|
||||
|
||||
### Recommendations
|
||||
|
||||
${conclusions.recommendations.map((r, i) => `
|
||||
${i+1}. **${r.action}** (Priority: ${r.priority})
|
||||
- Rationale: ${r.rationale}
|
||||
`).join('\n')}
|
||||
|
||||
### Remaining Questions
|
||||
|
||||
${conclusions.open_questions.map(q => `- ${q}`).join('\n')}
|
||||
|
||||
---
|
||||
|
||||
## Current Understanding (Final)
|
||||
|
||||
### What We Established
|
||||
|
||||
${establishedPoints.map(p => `- ${p}`).join('\n')}
|
||||
|
||||
### What Was Clarified/Corrected
|
||||
|
||||
${corrections.map(c => `- ~~${c.original}~~ → ${c.corrected}`).join('\n')}
|
||||
|
||||
### Key Insights
|
||||
|
||||
${keyInsights.map(i => `- ${i}`).join('\n')}
|
||||
|
||||
---
|
||||
|
||||
## Session Statistics
|
||||
|
||||
- **Total Rounds**: ${totalRounds}
|
||||
- **Duration**: ${duration}
|
||||
- **Sources Used**: ${sources.join(', ')}
|
||||
- **Artifacts Generated**: discussion.md, explorations.json, conclusions.json
|
||||
```
|
||||
|
||||
**Step 4.3: Post-Completion Options**
|
||||
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "分析完成。是否需要后续操作?",
|
||||
header: "Next Steps",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "创建Issue", description: "将结论转为可执行的Issue" },
|
||||
{ label: "生成任务", description: "创建实施任务" },
|
||||
{ label: "导出报告", description: "生成独立的分析报告" },
|
||||
{ label: "完成", description: "不需要后续操作" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
// Handle selections
|
||||
if (selection.includes("创建Issue")) {
|
||||
SlashCommand("/issue:new", `${topic_or_question} - 分析结论实施`)
|
||||
}
|
||||
if (selection.includes("生成任务")) {
|
||||
SlashCommand("/workflow:lite-plan", `实施分析结论: ${summary}`)
|
||||
}
|
||||
if (selection.includes("导出报告")) {
|
||||
exportAnalysisReport(sessionFolder)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Session Folder Structure
|
||||
|
||||
```
|
||||
.workflow/.analysis/ANL-{slug}-{date}/
|
||||
├── discussion.md # Evolution of understanding & discussions
|
||||
├── explorations.json # CLI exploration findings
|
||||
├── conclusions.json # Final synthesis
|
||||
└── exploration-*.json # Individual exploration results (optional)
|
||||
```
|
||||
|
||||
## Discussion Document Template
|
||||
|
||||
```markdown
|
||||
# Analysis Discussion
|
||||
|
||||
**Session ID**: ANL-xxx-2025-01-25
|
||||
**Topic**: [topic or question]
|
||||
**Started**: 2025-01-25T10:00:00+08:00
|
||||
**Dimensions**: [architecture, implementation, ...]
|
||||
|
||||
---
|
||||
|
||||
## User Context
|
||||
|
||||
**Focus Areas**: [user-selected focus]
|
||||
**Analysis Depth**: [quick|standard|deep]
|
||||
|
||||
---
|
||||
|
||||
## Discussion Timeline
|
||||
|
||||
### Round 1 - Initial Understanding (2025-01-25 10:00)
|
||||
|
||||
#### Topic Analysis
|
||||
...
|
||||
|
||||
#### Exploration Results
|
||||
...
|
||||
|
||||
### Round 2 - Discussion (2025-01-25 10:15)
|
||||
|
||||
#### User Input
|
||||
...
|
||||
|
||||
#### Updated Understanding
|
||||
...
|
||||
|
||||
#### Corrected Assumptions
|
||||
- ~~[wrong]~~ → [corrected]
|
||||
|
||||
### Round 3 - Deep Dive (2025-01-25 10:30)
|
||||
...
|
||||
|
||||
---
|
||||
|
||||
## Conclusions (2025-01-25 11:00)
|
||||
|
||||
### Summary
|
||||
...
|
||||
|
||||
### Key Conclusions
|
||||
...
|
||||
|
||||
### Recommendations
|
||||
...
|
||||
|
||||
---
|
||||
|
||||
## Current Understanding (Final)
|
||||
|
||||
### What We Established
|
||||
- [confirmed points]
|
||||
|
||||
### What Was Clarified/Corrected
|
||||
- ~~[original assumption]~~ → [corrected understanding]
|
||||
|
||||
### Key Insights
|
||||
- [insights gained]
|
||||
|
||||
---
|
||||
|
||||
## Session Statistics
|
||||
|
||||
- **Total Rounds**: 3
|
||||
- **Duration**: 1 hour
|
||||
- **Sources Used**: codebase exploration, Gemini analysis
|
||||
- **Artifacts Generated**: discussion.md, explorations.json, conclusions.json
|
||||
```
|
||||
|
||||
## Iteration Flow
|
||||
|
||||
```
|
||||
First Call (/workflow:analyze-with-file "topic"):
|
||||
├─ No session exists → New mode
|
||||
├─ Identify analysis dimensions
|
||||
├─ Scope with user (unless --yes)
|
||||
├─ Create discussion.md with initial understanding
|
||||
├─ Launch CLI explorations
|
||||
└─ Enter discussion loop
|
||||
|
||||
Continue Call (/workflow:analyze-with-file --continue "topic"):
|
||||
├─ Session exists → Continue mode
|
||||
├─ Load discussion.md
|
||||
├─ Resume from last round
|
||||
└─ Continue discussion loop
|
||||
|
||||
Discussion Loop:
|
||||
├─ Present current findings
|
||||
├─ Gather user feedback (AskUserQuestion)
|
||||
├─ Process response:
|
||||
│ ├─ Agree → Deepen analysis
|
||||
│ ├─ Adjust → Change direction
|
||||
│ ├─ Question → Answer then continue
|
||||
│ └─ Complete → Exit loop
|
||||
├─ Update discussion.md
|
||||
└─ Repeat until complete or max rounds
|
||||
|
||||
Completion:
|
||||
├─ Generate conclusions.json
|
||||
├─ Update discussion.md with final synthesis
|
||||
└─ Offer follow-up options (issue, task, report)
|
||||
```
|
||||
|
||||
## CLI Integration Points
|
||||
|
||||
### 1. Codebase Exploration (cli-explore-agent)
|
||||
|
||||
**Purpose**: Gather relevant code context
|
||||
|
||||
**When**: Topic involves implementation or architecture analysis
|
||||
|
||||
### 2. Gemini Deep Analysis
|
||||
|
||||
**Purpose**: Conceptual analysis, pattern identification, best practices
|
||||
|
||||
**Prompt Pattern**:
|
||||
```
|
||||
PURPOSE: Analyze topic + identify insights
|
||||
TASK: Explore dimensions + generate discussion points
|
||||
CONTEXT: Codebase + topic
|
||||
EXPECTED: Structured analysis + questions
|
||||
```
|
||||
|
||||
### 3. Follow-up CLI Calls
|
||||
|
||||
**Purpose**: Deepen specific areas based on user feedback
|
||||
|
||||
**Dynamic invocation** based on discussion direction
|
||||
|
||||
## Consolidation Rules
|
||||
|
||||
When updating "Current Understanding":
|
||||
|
||||
1. **Promote confirmed insights**: Move validated findings to "What We Established"
|
||||
2. **Track corrections**: Keep important wrong→right transformations
|
||||
3. **Focus on current state**: What do we know NOW
|
||||
4. **Avoid timeline repetition**: Don't copy discussion details
|
||||
5. **Preserve key learnings**: Keep insights valuable for future reference
|
||||
|
||||
**Bad (cluttered)**:
|
||||
```markdown
|
||||
## Current Understanding
|
||||
|
||||
In round 1 we discussed X, then in round 2 user said Y, and we explored Z...
|
||||
```
|
||||
|
||||
**Good (consolidated)**:
|
||||
```markdown
|
||||
## Current Understanding
|
||||
|
||||
### What We Established
|
||||
- The authentication flow uses JWT with refresh tokens
|
||||
- Rate limiting is implemented at API gateway level
|
||||
|
||||
### What Was Clarified
|
||||
- ~~Assumed Redis for sessions~~ → Actually uses database-backed sessions
|
||||
|
||||
### Key Insights
|
||||
- Current architecture supports horizontal scaling
|
||||
- Security audit recommended before production
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| CLI exploration fails | Continue with available context, note limitation |
|
||||
| User timeout in discussion | Save state, show resume command |
|
||||
| Max rounds reached | Force synthesis, offer continuation option |
|
||||
| No relevant findings | Broaden search, ask user for clarification |
|
||||
| Session folder conflict | Append timestamp suffix |
|
||||
| Gemini unavailable | Fallback to Codex or manual analysis |
|
||||
|
||||
|
||||
## Usage Recommendations
|
||||
|
||||
Use `/workflow:analyze-with-file` when:
|
||||
- Exploring a complex topic collaboratively
|
||||
- Need documented discussion trail
|
||||
- Decision-making requires multiple perspectives
|
||||
- Want to iterate on understanding with user input
|
||||
- Building shared understanding before implementation
|
||||
|
||||
Use `/workflow:debug-with-file` when:
|
||||
- Diagnosing specific bugs
|
||||
- Need hypothesis-driven investigation
|
||||
- Focus on evidence and verification
|
||||
|
||||
Use `/workflow:lite-plan` when:
|
||||
- Ready to implement (past analysis phase)
|
||||
- Need structured task breakdown
|
||||
- Focus on execution planning
|
||||
1153
.claude/commands/workflow/brainstorm-with-file.md
Normal file
1153
.claude/commands/workflow/brainstorm-with-file.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,585 +0,0 @@
|
||||
---
|
||||
name: api-designer
|
||||
description: Generate or update api-designer/analysis.md addressing guidance-specification discussion points for API design perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🔌 **API Designer Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating api-designer/analysis.md** that addresses guidance-specification.md discussion points from backend API design perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **API Design Focus**: RESTful/GraphQL API design, endpoint structure, and contract definition
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **API Architecture**: RESTful/GraphQL design patterns and best practices
|
||||
- **Endpoint Design**: Resource modeling, URL structure, and HTTP method selection
|
||||
- **Data Contracts**: Request/response schemas, validation rules, and data transformation
|
||||
- **API Documentation**: OpenAPI/Swagger specifications and developer experience
|
||||
|
||||
### Role Boundaries & Responsibilities
|
||||
|
||||
#### **What This Role OWNS (API Contract Within Architectural Framework)**
|
||||
- **API Contract Definition**: Specific endpoint paths, HTTP methods, and status codes
|
||||
- **Resource Modeling**: Mapping domain entities to RESTful resources or GraphQL types
|
||||
- **Request/Response Schemas**: Detailed data contracts, validation rules, and error formats
|
||||
- **API Versioning Strategy**: Version management, deprecation policies, and migration paths
|
||||
- **Developer Experience**: API documentation (OpenAPI/Swagger), code examples, and SDKs
|
||||
|
||||
#### **What This Role DOES NOT Own (Defers to Other Roles)**
|
||||
- **System Architecture Decisions**: Microservices vs monolithic, overall communication patterns → Defers to **System Architect**
|
||||
- **Canonical Data Model**: Underlying data schemas and entity relationships → Defers to **Data Architect**
|
||||
- **UI/Frontend Integration**: How clients consume the API → Defers to **UI Designer**
|
||||
|
||||
#### **Handoff Points**
|
||||
- **FROM System Architect**: Receives architectural constraints (REST vs GraphQL, sync vs async) that define the design space
|
||||
- **FROM Data Architect**: Receives canonical data model and translates it into public API data contracts (as projection/view)
|
||||
- **TO Frontend Teams**: Provides complete API specifications, documentation, and integration guides
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Check existing analysis
|
||||
CHECK: brainstorm_dir/api-designer/analysis.md
|
||||
IF EXISTS:
|
||||
SHOW existing analysis summary
|
||||
ASK: "Analysis exists. Do you want to:"
|
||||
OPTIONS:
|
||||
1. "Update with new insights" → Update existing
|
||||
2. "Replace completely" → Generate new
|
||||
3. "Cancel" → Exit without changes
|
||||
ELSE:
|
||||
CREATE new analysis
|
||||
```
|
||||
|
||||
### Phase 3: Agent Task Generation
|
||||
**Framework-Based Analysis** (when guidance-specification.md exists):
|
||||
```bash
|
||||
Task(subagent_type="conceptual-planning-agent",
|
||||
prompt="Generate API designer analysis addressing topic framework
|
||||
|
||||
## Framework Integration Required
|
||||
**MANDATORY**: Load and address guidance-specification.md discussion points
|
||||
**Framework Reference**: @{session.brainstorm_dir}/guidance-specification.md
|
||||
**Output Location**: {session.brainstorm_dir}/api-designer/analysis.md
|
||||
|
||||
## Analysis Requirements
|
||||
1. **Load Topic Framework**: Read guidance-specification.md completely
|
||||
2. **Address Each Discussion Point**: Respond to all 5 framework sections from API design perspective
|
||||
3. **Include Framework Reference**: Start analysis.md with @../guidance-specification.md
|
||||
4. **API Design Focus**: Emphasize endpoint structure, data contracts, versioning strategies
|
||||
5. **Structured Response**: Use framework structure for analysis organization
|
||||
|
||||
## Framework Sections to Address
|
||||
- Core Requirements (from API design perspective)
|
||||
- Technical Considerations (detailed API architecture analysis)
|
||||
- User Experience Factors (developer experience and API usability)
|
||||
- Implementation Challenges (API design risks and solutions)
|
||||
- Success Metrics (API performance metrics and adoption tracking)
|
||||
|
||||
## Output Structure Required
|
||||
```markdown
|
||||
# API Designer Analysis: [Topic]
|
||||
|
||||
**Framework Reference**: @../guidance-specification.md
|
||||
**Role Focus**: Backend API Design and Contract Definition
|
||||
|
||||
## Core Requirements Analysis
|
||||
[Address framework requirements from API design perspective]
|
||||
|
||||
## Technical Considerations
|
||||
[Detailed API architecture and endpoint design analysis]
|
||||
|
||||
## Developer Experience Factors
|
||||
[API usability, documentation, and integration ease]
|
||||
|
||||
## Implementation Challenges
|
||||
[API design risks and mitigation strategies]
|
||||
|
||||
## Success Metrics
|
||||
[API performance metrics, adoption rates, and developer satisfaction]
|
||||
|
||||
## API Design-Specific Recommendations
|
||||
[Detailed API design recommendations and best practices]
|
||||
```",
|
||||
description="Generate API designer framework-based analysis")
|
||||
```
|
||||
|
||||
### Phase 4: Update Mechanism
|
||||
**Analysis Update Process**:
|
||||
```bash
|
||||
# For existing analysis updates
|
||||
IF update_mode = "incremental":
|
||||
Task(subagent_type="conceptual-planning-agent",
|
||||
prompt="Update existing API designer analysis
|
||||
|
||||
## Current Analysis Context
|
||||
**Existing Analysis**: @{session.brainstorm_dir}/api-designer/analysis.md
|
||||
**Framework Reference**: @{session.brainstorm_dir}/guidance-specification.md
|
||||
|
||||
## Update Requirements
|
||||
1. **Preserve Structure**: Maintain existing analysis structure
|
||||
2. **Add New Insights**: Integrate new API design insights and recommendations
|
||||
3. **Framework Alignment**: Ensure continued alignment with topic framework
|
||||
4. **API Updates**: Add new endpoint patterns, versioning strategies, documentation improvements
|
||||
5. **Maintain References**: Keep @../guidance-specification.md reference
|
||||
|
||||
## Update Instructions
|
||||
- Read existing analysis completely
|
||||
- Identify areas for enhancement or new insights
|
||||
- Add API design depth while preserving original structure
|
||||
- Update recommendations with new API design patterns and approaches
|
||||
- Maintain framework discussion point addressing",
|
||||
description="Update API designer analysis incrementally")
|
||||
```
|
||||
|
||||
## Document Structure
|
||||
|
||||
### Output Files
|
||||
```
|
||||
.workflow/WFS-[topic]/.brainstorming/
|
||||
├── guidance-specification.md # Input: Framework (if exists)
|
||||
└── api-designer/
|
||||
└── analysis.md # ★ OUTPUT: Framework-based analysis
|
||||
```
|
||||
|
||||
### Analysis Structure
|
||||
**Required Elements**:
|
||||
- **Framework Reference**: @../guidance-specification.md (if framework exists)
|
||||
- **Role Focus**: Backend API Design and Contract Definition perspective
|
||||
- **5 Framework Sections**: Address each framework discussion point
|
||||
- **API Design Recommendations**: Endpoint-specific insights and solutions
|
||||
|
||||
## ⚡ **Two-Step Execution Flow**
|
||||
|
||||
### ⚠️ Session Management - FIRST STEP
|
||||
Session detection and selection:
|
||||
```bash
|
||||
# Check for active sessions
|
||||
active_sessions=$(find .workflow -name ".active-*" 2>/dev/null)
|
||||
if [ multiple_sessions ]; then
|
||||
prompt_user_to_select_session()
|
||||
else
|
||||
use_existing_or_create_new()
|
||||
fi
|
||||
```
|
||||
|
||||
### Step 1: Context Gathering Phase
|
||||
**API Designer Perspective Questioning**
|
||||
|
||||
Before agent assignment, gather comprehensive API design context:
|
||||
|
||||
#### 📋 Role-Specific Questions
|
||||
1. **API Type & Architecture**
|
||||
- RESTful, GraphQL, or hybrid API approach?
|
||||
- Synchronous vs asynchronous communication patterns?
|
||||
- Real-time requirements (WebSocket, Server-Sent Events)?
|
||||
|
||||
2. **Resource Modeling & Endpoints**
|
||||
- What are the core domain resources/entities?
|
||||
- Expected CRUD operations for each resource?
|
||||
- Complex query requirements (filtering, sorting, pagination)?
|
||||
|
||||
3. **Data Contracts & Validation**
|
||||
- Request/response data format requirements (JSON, XML, Protocol Buffers)?
|
||||
- Input validation and sanitization requirements?
|
||||
- Data transformation and mapping needs?
|
||||
|
||||
4. **API Management & Governance**
|
||||
- API versioning strategy requirements?
|
||||
- Authentication and authorization mechanisms?
|
||||
- Rate limiting and throttling requirements?
|
||||
- API documentation and developer portal needs?
|
||||
|
||||
5. **Integration & Compatibility**
|
||||
- Client platforms consuming the API (web, mobile, third-party)?
|
||||
- Backward compatibility requirements?
|
||||
- External API integrations needed?
|
||||
|
||||
#### Context Validation
|
||||
- **Minimum Response**: Each answer must be ≥50 characters
|
||||
- **Re-prompting**: Insufficient detail triggers follow-up questions
|
||||
- **Context Storage**: Save responses to `.brainstorming/api-designer-context.md`
|
||||
|
||||
### Step 2: Agent Assignment with Flow Control
|
||||
**Dedicated Agent Execution**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute dedicated api-designer conceptual analysis for: {topic}
|
||||
|
||||
ASSIGNED_ROLE: api-designer
|
||||
OUTPUT_LOCATION: .brainstorming/api-designer/
|
||||
USER_CONTEXT: {validated_responses_from_context_gathering}
|
||||
|
||||
Flow Control Steps:
|
||||
[
|
||||
{
|
||||
\"step\": \"load_role_template\",
|
||||
\"action\": \"Load api-designer planning template\",
|
||||
\"command\": \"bash($(cat ~/.claude/workflows/cli-templates/planning-roles/api-designer.md))\",
|
||||
\"output_to\": \"role_template\"
|
||||
}
|
||||
]
|
||||
|
||||
Conceptual Analysis Requirements:
|
||||
- Apply api-designer perspective to topic analysis
|
||||
- Focus on endpoint design, data contracts, and API governance
|
||||
- Use loaded role template framework for analysis structure
|
||||
- Generate role-specific deliverables in designated output location
|
||||
- Address all user context from questioning phase
|
||||
|
||||
Deliverables:
|
||||
- analysis.md: Main API design analysis
|
||||
- api-specification.md: Detailed endpoint specifications
|
||||
- data-contracts.md: Request/response schemas and validation rules
|
||||
- api-documentation.md: API documentation strategy and templates
|
||||
|
||||
Embody api-designer role expertise for comprehensive conceptual planning."
|
||||
```
|
||||
|
||||
### Progress Tracking
|
||||
TodoWrite tracking for two-step process:
|
||||
```json
|
||||
[
|
||||
{"content": "Gather API designer context through role-specific questioning", "status": "in_progress", "activeForm": "Gathering context"},
|
||||
{"content": "Validate context responses and save to api-designer-context.md", "status": "pending", "activeForm": "Validating context"},
|
||||
{"content": "Load api-designer planning template via flow control", "status": "pending", "activeForm": "Loading template"},
|
||||
{"content": "Execute dedicated conceptual-planning-agent for api-designer role", "status": "pending", "activeForm": "Executing agent"}
|
||||
]
|
||||
```
|
||||
|
||||
## 📊 **Output Specification**
|
||||
|
||||
### Output Location
|
||||
```
|
||||
.workflow/WFS-{topic-slug}/.brainstorming/api-designer/
|
||||
├── analysis.md # Primary API design analysis
|
||||
├── api-specification.md # Detailed endpoint specifications (OpenAPI/Swagger)
|
||||
├── data-contracts.md # Request/response schemas and validation rules
|
||||
├── versioning-strategy.md # API versioning and backward compatibility plan
|
||||
└── developer-guide.md # API usage documentation and integration examples
|
||||
```
|
||||
|
||||
### Document Templates
|
||||
|
||||
#### analysis.md Structure
|
||||
```markdown
|
||||
# API Design Analysis: {Topic}
|
||||
*Generated: {timestamp}*
|
||||
|
||||
## Executive Summary
|
||||
[Key API design findings and recommendations overview]
|
||||
|
||||
## API Architecture Overview
|
||||
### API Type Selection (REST/GraphQL/Hybrid)
|
||||
### Communication Patterns
|
||||
### Authentication & Authorization Strategy
|
||||
|
||||
## Resource Modeling
|
||||
### Core Domain Resources
|
||||
### Resource Relationships
|
||||
### URL Structure and Naming Conventions
|
||||
|
||||
## Endpoint Design
|
||||
### Resource Endpoints
|
||||
- GET /api/v1/resources
|
||||
- POST /api/v1/resources
|
||||
- GET /api/v1/resources/{id}
|
||||
- PUT /api/v1/resources/{id}
|
||||
- DELETE /api/v1/resources/{id}
|
||||
|
||||
### Query Parameters
|
||||
- Filtering: ?filter[field]=value
|
||||
- Sorting: ?sort=field,-field2
|
||||
- Pagination: ?page=1&limit=20
|
||||
|
||||
### HTTP Methods and Status Codes
|
||||
- Success responses (2xx)
|
||||
- Client errors (4xx)
|
||||
- Server errors (5xx)
|
||||
|
||||
## Data Contracts
|
||||
### Request Schemas
|
||||
[JSON Schema or OpenAPI definitions]
|
||||
|
||||
### Response Schemas
|
||||
[JSON Schema or OpenAPI definitions]
|
||||
|
||||
### Validation Rules
|
||||
- Required fields
|
||||
- Data types and formats
|
||||
- Business logic constraints
|
||||
|
||||
## API Versioning Strategy
|
||||
### Versioning Approach (URL/Header/Accept)
|
||||
### Version Lifecycle Management
|
||||
### Deprecation Policy
|
||||
### Migration Paths
|
||||
|
||||
## Security & Governance
|
||||
### Authentication Mechanisms
|
||||
- OAuth 2.0 / JWT / API Keys
|
||||
### Authorization Patterns
|
||||
- RBAC / ABAC / Resource-based
|
||||
### Rate Limiting & Throttling
|
||||
### CORS and Security Headers
|
||||
|
||||
## Error Handling
|
||||
### Standard Error Response Format
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "ERROR_CODE",
|
||||
"message": "Human-readable error message",
|
||||
"details": [],
|
||||
"trace_id": "uuid"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Code Taxonomy
|
||||
### Validation Error Responses
|
||||
|
||||
## API Documentation
|
||||
### OpenAPI/Swagger Specification
|
||||
### Developer Portal Requirements
|
||||
### Code Examples and SDKs
|
||||
### Changelog and Migration Guides
|
||||
|
||||
## Performance Optimization
|
||||
### Response Caching Strategies
|
||||
### Compression (gzip, brotli)
|
||||
### Field Selection (sparse fieldsets)
|
||||
### Bulk Operations and Batch Endpoints
|
||||
|
||||
## Monitoring & Observability
|
||||
### API Metrics
|
||||
- Request count, latency, error rates
|
||||
- Endpoint usage analytics
|
||||
### Logging Strategy
|
||||
### Distributed Tracing
|
||||
|
||||
## Developer Experience
|
||||
### API Usability Assessment
|
||||
### Integration Complexity
|
||||
### SDK and Client Library Needs
|
||||
### Sandbox and Testing Environments
|
||||
```
|
||||
|
||||
#### api-specification.md Structure
|
||||
```markdown
|
||||
# API Specification: {Topic}
|
||||
*OpenAPI 3.0 Specification*
|
||||
|
||||
## API Information
|
||||
- **Title**: {API Name}
|
||||
- **Version**: 1.0.0
|
||||
- **Base URL**: https://api.example.com/v1
|
||||
- **Contact**: api-team@example.com
|
||||
|
||||
## Endpoints
|
||||
|
||||
### Users API
|
||||
|
||||
#### GET /users
|
||||
**Description**: Retrieve a list of users
|
||||
|
||||
**Parameters**:
|
||||
- `page` (query, integer): Page number (default: 1)
|
||||
- `limit` (query, integer): Items per page (default: 20, max: 100)
|
||||
- `sort` (query, string): Sort field (e.g., "created_at", "-updated_at")
|
||||
- `filter[status]` (query, string): Filter by user status
|
||||
|
||||
**Response 200**:
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"id": "uuid",
|
||||
"username": "string",
|
||||
"email": "string",
|
||||
"created_at": "2025-10-15T00:00:00Z"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"page": 1,
|
||||
"limit": 20,
|
||||
"total": 100
|
||||
},
|
||||
"links": {
|
||||
"self": "/users?page=1",
|
||||
"next": "/users?page=2",
|
||||
"prev": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### POST /users
|
||||
**Description**: Create a new user
|
||||
|
||||
**Request Body**:
|
||||
```json
|
||||
{
|
||||
"username": "string (required, 3-50 chars)",
|
||||
"email": "string (required, valid email)",
|
||||
"password": "string (required, min 8 chars)",
|
||||
"profile": {
|
||||
"first_name": "string (optional)",
|
||||
"last_name": "string (optional)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response 201**:
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"id": "uuid",
|
||||
"username": "string",
|
||||
"email": "string",
|
||||
"created_at": "2025-10-15T00:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response 400** (Validation Error):
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "VALIDATION_ERROR",
|
||||
"message": "Request validation failed",
|
||||
"details": [
|
||||
{
|
||||
"field": "email",
|
||||
"message": "Invalid email format"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[Continue for all endpoints...]
|
||||
|
||||
## Authentication
|
||||
|
||||
### OAuth 2.0 Flow
|
||||
1. Client requests authorization
|
||||
2. User grants permission
|
||||
3. Client receives access token
|
||||
4. Client uses token in requests
|
||||
|
||||
**Header Format**:
|
||||
```
|
||||
Authorization: Bearer {access_token}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
**Headers**:
|
||||
- `X-RateLimit-Limit`: 1000
|
||||
- `X-RateLimit-Remaining`: 999
|
||||
- `X-RateLimit-Reset`: 1634270400
|
||||
|
||||
**Response 429** (Too Many Requests):
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "RATE_LIMIT_EXCEEDED",
|
||||
"message": "API rate limit exceeded",
|
||||
"retry_after": 3600
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Status Synchronization
|
||||
Upon completion, update `workflow-session.json`:
|
||||
```json
|
||||
{
|
||||
"phases": {
|
||||
"BRAINSTORM": {
|
||||
"api_designer": {
|
||||
"status": "completed",
|
||||
"completed_at": "timestamp",
|
||||
"output_directory": ".workflow/WFS-{topic}/.brainstorming/api-designer/",
|
||||
"key_insights": ["endpoint_design", "versioning_strategy", "data_contracts"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Cross-Role Collaboration
|
||||
API designer perspective provides:
|
||||
- **API Contract Specifications** → Frontend Developer
|
||||
- **Data Schema Requirements** → Data Architect
|
||||
- **Security Requirements** → Security Expert
|
||||
- **Integration Endpoints** → System Architect
|
||||
- **Performance Constraints** → DevOps Engineer
|
||||
|
||||
## ✅ **Quality Assurance**
|
||||
|
||||
### Required Analysis Elements
|
||||
- [ ] Complete endpoint inventory with HTTP methods and paths
|
||||
- [ ] Detailed request/response schemas with validation rules
|
||||
- [ ] Clear versioning strategy and backward compatibility plan
|
||||
- [ ] Comprehensive error handling and status code usage
|
||||
- [ ] API documentation strategy (OpenAPI/Swagger)
|
||||
|
||||
### API Design Principles
|
||||
- [ ] **Consistency**: Uniform naming conventions and patterns across all endpoints
|
||||
- [ ] **Simplicity**: Intuitive resource modeling and URL structures
|
||||
- [ ] **Flexibility**: Support for filtering, sorting, pagination, and field selection
|
||||
- [ ] **Security**: Proper authentication, authorization, and input validation
|
||||
- [ ] **Performance**: Caching strategies, compression, and efficient data structures
|
||||
|
||||
### Developer Experience Validation
|
||||
- [ ] API is self-documenting with clear endpoint descriptions
|
||||
- [ ] Error messages are actionable and helpful for debugging
|
||||
- [ ] Response formats are consistent and predictable
|
||||
- [ ] Code examples and integration guides are provided
|
||||
- [ ] Sandbox environment available for testing
|
||||
|
||||
### Technical Completeness
|
||||
- [ ] **Resource Modeling**: All domain entities mapped to API resources
|
||||
- [ ] **CRUD Coverage**: Complete create, read, update, delete operations
|
||||
- [ ] **Query Capabilities**: Advanced filtering, sorting, and search functionality
|
||||
- [ ] **Versioning**: Clear version management and migration paths
|
||||
- [ ] **Monitoring**: API metrics, logging, and tracing strategies defined
|
||||
|
||||
### Integration Readiness
|
||||
- [ ] **Client Compatibility**: API works with all target client platforms
|
||||
- [ ] **External Integration**: Third-party API dependencies identified
|
||||
- [ ] **Backward Compatibility**: Changes don't break existing clients
|
||||
- [ ] **Migration Path**: Clear upgrade paths for API consumers
|
||||
- [ ] **SDK Support**: Client libraries and code generation considered
|
||||
@@ -1,455 +1,367 @@
|
||||
---
|
||||
name: artifacts
|
||||
description: Interactive clarification generating confirmed guidance specification through role-based analysis and synthesis
|
||||
argument-hint: "topic or challenge description [--count N]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*)
|
||||
argument-hint: "[-y|--yes] topic or challenge description [--count N]"
|
||||
allowed-tools: TodoWrite(*), Read(*), Write(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended roles, skip all clarification questions, use default answers.
|
||||
|
||||
## Overview
|
||||
|
||||
Six-phase workflow: **Automatic project context collection** → Extract topic challenges → Select roles → Generate task-specific questions → Detect conflicts → Generate confirmed guidance (declarative statements only).
|
||||
Seven-phase workflow: **Context collection** → **Topic analysis** → **Role selection** → **Role questions** → **Conflict resolution** → **Final check** → **Generate specification**
|
||||
|
||||
All user interactions use AskUserQuestion tool (max 4 questions per call, multi-round).
|
||||
|
||||
**Input**: `"GOAL: [objective] SCOPE: [boundaries] CONTEXT: [background]" [--count N]`
|
||||
**Output**: `.workflow/WFS-{topic}/.brainstorming/guidance-specification.md` (CONFIRMED/SELECTED format)
|
||||
**Core Principle**: Questions dynamically generated from project context + topic keywords/challenges, NOT from generic templates
|
||||
**Output**: `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md`
|
||||
**Core Principle**: Questions dynamically generated from project context + topic keywords, NOT generic templates
|
||||
|
||||
**Parameters**:
|
||||
- `topic` (required): Topic or challenge description (structured format recommended)
|
||||
- `--count N` (optional): Number of roles user WANTS to select (system will recommend N+2 options for user to choose from, default: 3)
|
||||
- `--count N` (optional): Number of roles to select (system recommends N+2 options, default: 3)
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Phase Summary
|
||||
|
||||
| Phase | Goal | AskUserQuestion | Storage |
|
||||
|-------|------|-----------------|---------|
|
||||
| 0 | Context collection | - | context-package.json |
|
||||
| 1 | Topic analysis | 2-4 questions | intent_context |
|
||||
| 2 | Role selection | 1 multi-select | selected_roles |
|
||||
| 3 | Role questions | 3-4 per role | role_decisions[role] |
|
||||
| 4 | Conflict resolution | max 4 per round | cross_role_decisions |
|
||||
| 4.5 | Final check | progressive rounds | additional_decisions |
|
||||
| 5 | Generate spec | - | guidance-specification.md |
|
||||
|
||||
### AskUserQuestion Pattern
|
||||
|
||||
```javascript
|
||||
// Single-select (Phase 1, 3, 4)
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "{问题文本}",
|
||||
header: "{短标签}", // max 12 chars
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "{选项}", description: "{说明和影响}" },
|
||||
{ label: "{选项}", description: "{说明和影响}" },
|
||||
{ label: "{选项}", description: "{说明和影响}" }
|
||||
]
|
||||
}
|
||||
// ... max 4 questions per call
|
||||
]
|
||||
})
|
||||
|
||||
// Multi-select (Phase 2)
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "请选择 {count} 个角色",
|
||||
header: "角色选择",
|
||||
multiSelect: true,
|
||||
options: [/* max 4 options per call */]
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
### Multi-Round Execution
|
||||
|
||||
```javascript
|
||||
const BATCH_SIZE = 4;
|
||||
for (let i = 0; i < allQuestions.length; i += BATCH_SIZE) {
|
||||
const batch = allQuestions.slice(i, i + BATCH_SIZE);
|
||||
AskUserQuestion({ questions: batch });
|
||||
// Store responses before next round
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task Tracking
|
||||
|
||||
**⚠️ TodoWrite Rule**: EXTEND auto-parallel's task list (NOT replace/overwrite)
|
||||
**TodoWrite Rule**: EXTEND auto-parallel's task list (NOT replace/overwrite)
|
||||
|
||||
**When called from auto-parallel**:
|
||||
- Find the artifacts parent task: "Execute artifacts command for interactive framework generation"
|
||||
- Mark parent task as "in_progress"
|
||||
- APPEND artifacts sub-tasks AFTER the parent task (Phase 0-5)
|
||||
- Mark each sub-task as it completes
|
||||
- When Phase 5 completes, mark parent task as "completed"
|
||||
- **PRESERVE all other auto-parallel tasks** (role agents, synthesis)
|
||||
- Find artifacts parent task → Mark "in_progress"
|
||||
- APPEND sub-tasks (Phase 0-5) → Mark each as completes
|
||||
- When Phase 5 completes → Mark parent "completed"
|
||||
- PRESERVE all other auto-parallel tasks
|
||||
|
||||
**Standalone Mode**:
|
||||
```json
|
||||
[
|
||||
{"content": "Initialize session (.workflow/.active-* check, parse --count parameter)", "status": "pending", "activeForm": "Initializing"},
|
||||
{"content": "Phase 0: Automatic project context collection (call context-gather)", "status": "pending", "activeForm": "Phase 0 context collection"},
|
||||
{"content": "Phase 1: Extract challenges, output 2-4 task-specific questions, wait for user input", "status": "pending", "activeForm": "Phase 1 topic analysis"},
|
||||
{"content": "Phase 2: Recommend count+2 roles, output role selection, wait for user input", "status": "pending", "activeForm": "Phase 2 role selection"},
|
||||
{"content": "Phase 3: Generate 3-4 questions per role, output and wait for answers (max 10 per round)", "status": "pending", "activeForm": "Phase 3 role questions"},
|
||||
{"content": "Phase 4: Detect conflicts, output clarifications, wait for answers (max 10 per round)", "status": "pending", "activeForm": "Phase 4 conflict resolution"},
|
||||
{"content": "Phase 5: Transform Q&A to declarative statements, write guidance-specification.md", "status": "pending", "activeForm": "Phase 5 document generation"}
|
||||
{"content": "Initialize session", "status": "pending", "activeForm": "Initializing"},
|
||||
{"content": "Phase 0: Context collection", "status": "pending", "activeForm": "Phase 0"},
|
||||
{"content": "Phase 1: Topic analysis (2-4 questions)", "status": "pending", "activeForm": "Phase 1"},
|
||||
{"content": "Phase 2: Role selection", "status": "pending", "activeForm": "Phase 2"},
|
||||
{"content": "Phase 3: Role questions (per role)", "status": "pending", "activeForm": "Phase 3"},
|
||||
{"content": "Phase 4: Conflict resolution", "status": "pending", "activeForm": "Phase 4"},
|
||||
{"content": "Phase 4.5: Final clarification", "status": "pending", "activeForm": "Phase 4.5"},
|
||||
{"content": "Phase 5: Generate specification", "status": "pending", "activeForm": "Phase 5"}
|
||||
]
|
||||
```
|
||||
|
||||
## User Interaction Protocol
|
||||
|
||||
### Question Output Format
|
||||
|
||||
All questions output as structured text (detailed format with descriptions):
|
||||
|
||||
```markdown
|
||||
【问题{N} - {短标签}】{问题文本}
|
||||
a) {选项标签}
|
||||
说明:{选项说明和影响}
|
||||
b) {选项标签}
|
||||
说明:{选项说明和影响}
|
||||
c) {选项标签}
|
||||
说明:{选项说明和影响}
|
||||
|
||||
请回答:{N}a 或 {N}b 或 {N}c
|
||||
```
|
||||
|
||||
**Multi-select format** (Phase 2 role selection):
|
||||
```markdown
|
||||
【角色选择】请选择 {count} 个角色参与头脑风暴分析
|
||||
|
||||
a) {role-name} ({中文名})
|
||||
推荐理由:{基于topic的相关性说明}
|
||||
b) {role-name} ({中文名})
|
||||
推荐理由:{基于topic的相关性说明}
|
||||
...
|
||||
|
||||
支持格式:
|
||||
- 分别选择:2a 2c 2d (选择第2题的a、c、d选项)
|
||||
- 合并语法:2acd (选择a、c、d)
|
||||
- 逗号分隔:2a,c,d
|
||||
|
||||
请输入选择:
|
||||
```
|
||||
|
||||
### Input Parsing Rules
|
||||
|
||||
**Supported formats** (intelligent parsing):
|
||||
|
||||
1. **Space-separated**: `1a 2b 3c` → Q1:a, Q2:b, Q3:c
|
||||
2. **Comma-separated**: `1a,2b,3c` → Q1:a, Q2:b, Q3:c
|
||||
3. **Multi-select combined**: `2abc` → Q2: options a,b,c
|
||||
4. **Multi-select spaces**: `2 a b c` → Q2: options a,b,c
|
||||
5. **Multi-select comma**: `2a,b,c` → Q2: options a,b,c
|
||||
6. **Natural language**: `问题1选a` → 1a (fallback parsing)
|
||||
|
||||
**Parsing algorithm**:
|
||||
- Extract question numbers and option letters
|
||||
- Validate question numbers match output
|
||||
- Validate option letters exist for each question
|
||||
- If ambiguous/invalid, output example format and request re-input
|
||||
|
||||
**Error handling** (lenient):
|
||||
- Recognize common variations automatically
|
||||
- If parsing fails, show example and wait for clarification
|
||||
- Support re-input without penalty
|
||||
|
||||
### Batching Strategy
|
||||
|
||||
**Batch limits**:
|
||||
- **Default**: Maximum 10 questions per round
|
||||
- **Phase 2 (role selection)**: Display all recommended roles at once (count+2 roles)
|
||||
- **Auto-split**: If questions > 10, split into multiple rounds with clear round indicators
|
||||
|
||||
**Round indicators**:
|
||||
```markdown
|
||||
===== 第 1 轮问题 (共2轮) =====
|
||||
【问题1 - ...】...
|
||||
【问题2 - ...】...
|
||||
...
|
||||
【问题10 - ...】...
|
||||
|
||||
请回答 (格式: 1a 2b ... 10c):
|
||||
```
|
||||
|
||||
### Interaction Flow
|
||||
|
||||
**Standard flow**:
|
||||
1. Output questions in formatted text
|
||||
2. Output expected input format example
|
||||
3. Wait for user input
|
||||
4. Parse input with intelligent matching
|
||||
5. If parsing succeeds → Store answers and continue
|
||||
6. If parsing fails → Show error, example, and wait for re-input
|
||||
|
||||
**No question/option limits**: Text-based interaction removes previous 4-question and 4-option restrictions
|
||||
---
|
||||
|
||||
## Execution Phases
|
||||
|
||||
### Session Management
|
||||
- Check `.workflow/.active-*` markers first
|
||||
- Multiple sessions → Prompt selection | Single → Use it | None → Create `WFS-[topic-slug]`
|
||||
- Parse `--count N` parameter from user input (default: 3 if not specified)
|
||||
- Store decisions in `workflow-session.json` including count parameter
|
||||
|
||||
### Phase 0: Automatic Project Context Collection
|
||||
- Check `.workflow/active/` for existing sessions
|
||||
- Multiple → Prompt selection | Single → Use it | None → Create `WFS-[topic-slug]`
|
||||
- Parse `--count N` parameter (default: 3)
|
||||
- Store decisions in `workflow-session.json`
|
||||
|
||||
**Goal**: Gather project architecture, documentation, and relevant code context BEFORE user interaction
|
||||
### Phase 0: Context Collection
|
||||
|
||||
**Detection Mechanism** (execute first):
|
||||
```javascript
|
||||
// Check if context-package already exists
|
||||
const contextPackagePath = `.workflow/WFS-{session-id}/.process/context-package.json`;
|
||||
**Goal**: Gather project context BEFORE user interaction
|
||||
|
||||
if (file_exists(contextPackagePath)) {
|
||||
// Validate package
|
||||
const package = Read(contextPackagePath);
|
||||
if (package.metadata.session_id === session_id) {
|
||||
console.log("✅ Valid context-package found, skipping Phase 0");
|
||||
return; // Skip to Phase 1
|
||||
}
|
||||
}
|
||||
```
|
||||
**Steps**:
|
||||
1. Check if `context-package.json` exists → Skip if valid
|
||||
2. Invoke `context-search-agent` (BRAINSTORM MODE - lightweight)
|
||||
3. Output: `.workflow/active/WFS-{session-id}/.process/context-package.json`
|
||||
|
||||
**Implementation**: Invoke `context-search-agent` only if package doesn't exist
|
||||
**Graceful Degradation**: If agent fails, continue to Phase 1 without context
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="context-search-agent",
|
||||
run_in_background=false,
|
||||
description="Gather project context for brainstorm",
|
||||
prompt=`
|
||||
You are executing as context-search-agent (.claude/agents/context-search-agent.md).
|
||||
Execute context-search-agent in BRAINSTORM MODE (Phase 1-2 only).
|
||||
|
||||
## Execution Mode
|
||||
**BRAINSTORM MODE** (Lightweight) - Phase 1-2 only (skip deep analysis)
|
||||
Session: ${session_id}
|
||||
Task: ${task_description}
|
||||
Output: .workflow/${session_id}/.process/context-package.json
|
||||
|
||||
## Session Information
|
||||
- **Session ID**: ${session_id}
|
||||
- **Task Description**: ${task_description}
|
||||
- **Output Path**: .workflow/${session_id}/.process/context-package.json
|
||||
|
||||
## Mission
|
||||
Execute complete context-search-agent workflow for implementation planning:
|
||||
|
||||
### Phase 1: Initialization & Pre-Analysis
|
||||
1. **Detection**: Check for existing context-package (early exit if valid)
|
||||
2. **Foundation**: Initialize code-index, get project structure, load docs
|
||||
3. **Analysis**: Extract keywords, determine scope, classify complexity
|
||||
|
||||
### Phase 2: Multi-Source Context Discovery
|
||||
Execute all 3 discovery tracks:
|
||||
- **Track 1**: Reference documentation (CLAUDE.md, architecture docs)
|
||||
- **Track 2**: Web examples (use Exa MCP for unfamiliar tech/APIs)
|
||||
- **Track 3**: Codebase analysis (5-layer discovery: files, content, patterns, deps, config/tests)
|
||||
|
||||
### Phase 3: Synthesis, Assessment & Packaging
|
||||
1. Apply relevance scoring and build dependency graph
|
||||
2. Synthesize 3-source data (docs > code > web)
|
||||
3. Integrate brainstorm artifacts (if .brainstorming/ exists, read content)
|
||||
4. Perform conflict detection with risk assessment
|
||||
5. Generate and validate context-package.json
|
||||
|
||||
## Output Requirements
|
||||
Complete context-package.json with:
|
||||
- **metadata**: task_description, keywords, complexity, tech_stack, session_id
|
||||
- **project_context**: architecture_patterns, coding_conventions, tech_stack
|
||||
- **assets**: {documentation[], source_code[], config[], tests[]} with relevance scores
|
||||
- **dependencies**: {internal[], external[]} with dependency graph
|
||||
- **brainstorm_artifacts**: {guidance_specification, role_analyses[], synthesis_output} with content
|
||||
- **conflict_detection**: {risk_level, risk_factors, affected_modules[], mitigation_strategy}
|
||||
|
||||
## Quality Validation
|
||||
Before completion verify:
|
||||
- [ ] Valid JSON format with all required fields
|
||||
- [ ] File relevance accuracy >80%
|
||||
- [ ] Dependency graph complete (max 2 transitive levels)
|
||||
- [ ] Conflict risk level calculated correctly
|
||||
- [ ] No sensitive data exposed
|
||||
- [ ] Total files ≤50 (prioritize high-relevance)
|
||||
|
||||
Execute autonomously following agent documentation.
|
||||
Report completion with statistics.
|
||||
Required fields: metadata, project_context, assets, dependencies, conflict_detection
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Graceful Degradation**:
|
||||
- If agent fails: Log warning, continue to Phase 1 without project context
|
||||
- If package invalid: Re-run context-search-agent
|
||||
### Phase 1: Topic Analysis
|
||||
|
||||
### Phase 1: Topic Analysis & Intent Classification
|
||||
|
||||
**Goal**: Extract keywords/challenges to drive all subsequent question generation, **enriched by Phase 0 project context**
|
||||
**Goal**: Extract keywords/challenges enriched by Phase 0 context
|
||||
|
||||
**Steps**:
|
||||
1. **Load Phase 0 context** (if available):
|
||||
- Read `.workflow/WFS-{session-id}/.process/context-package.json`
|
||||
- Extract: tech_stack, existing modules, conflict_risk, relevant files
|
||||
1. Load Phase 0 context (tech_stack, modules, conflict_risk)
|
||||
2. Deep topic analysis (entities, challenges, constraints, metrics)
|
||||
3. Generate 2-4 context-aware probing questions
|
||||
4. AskUserQuestion → Store to `session.intent_context`
|
||||
|
||||
2. **Deep topic analysis** (context-aware):
|
||||
- Extract technical entities from topic + existing codebase
|
||||
- Identify core challenges considering existing architecture
|
||||
- Consider constraints (timeline/budget/compliance)
|
||||
- Define success metrics based on current project state
|
||||
|
||||
3. **Generate 2-4 context-aware probing questions**:
|
||||
- Reference existing tech stack in questions
|
||||
- Consider integration with existing modules
|
||||
- Address identified conflict risks from Phase 0
|
||||
- Target root challenges and trade-off priorities
|
||||
|
||||
4. **User interaction**: Output questions using text format (see User Interaction Protocol), wait for user input
|
||||
|
||||
5. **Parse user answers**: Use intelligent parsing to extract answers from user input (support multiple formats)
|
||||
|
||||
6. **Storage**: Store answers to `session.intent_context` with `{extracted_keywords, identified_challenges, user_answers, project_context_used}`
|
||||
|
||||
**Example Output**:
|
||||
```markdown
|
||||
===== Phase 1: 项目意图分析 =====
|
||||
|
||||
【问题1 - 核心挑战】实时协作平台的主要技术挑战?
|
||||
a) 实时数据同步
|
||||
说明:100+用户同时在线,状态同步复杂度高
|
||||
b) 可扩展性架构
|
||||
说明:用户规模增长时的系统扩展能力
|
||||
c) 冲突解决机制
|
||||
说明:多用户同时编辑的冲突处理策略
|
||||
|
||||
【问题2 - 优先级】MVP阶段最关注的指标?
|
||||
a) 功能完整性
|
||||
说明:实现所有核心功能
|
||||
b) 用户体验
|
||||
说明:流畅的交互体验和响应速度
|
||||
c) 系统稳定性
|
||||
说明:高可用性和数据一致性
|
||||
|
||||
请回答 (格式: 1a 2b):
|
||||
**Example**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "实时协作平台的主要技术挑战?",
|
||||
header: "核心挑战",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "实时数据同步", description: "100+用户同时在线,状态同步复杂度高" },
|
||||
{ label: "可扩展性架构", description: "用户规模增长时的系统扩展能力" },
|
||||
{ label: "冲突解决机制", description: "多用户同时编辑的冲突处理策略" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "MVP阶段最关注的指标?",
|
||||
header: "优先级",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "功能完整性", description: "实现所有核心功能" },
|
||||
{ label: "用户体验", description: "流畅的交互体验和响应速度" },
|
||||
{ label: "系统稳定性", description: "高可用性和数据一致性" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**User input examples**:
|
||||
- `1a 2c` → Q1:a, Q2:c
|
||||
- `1a,2c` → Q1:a, Q2:c
|
||||
|
||||
**⚠️ CRITICAL**: Questions MUST reference topic keywords. Generic "Project type?" violates dynamic generation.
|
||||
|
||||
### Phase 2: Role Selection
|
||||
|
||||
**⚠️ CRITICAL**: User MUST interact to select roles. NEVER auto-select without user confirmation.
|
||||
**Goal**: User selects roles from intelligent recommendations
|
||||
|
||||
**Available Roles**:
|
||||
- data-architect (数据架构师)
|
||||
- product-manager (产品经理)
|
||||
- product-owner (产品负责人)
|
||||
- scrum-master (敏捷教练)
|
||||
- subject-matter-expert (领域专家)
|
||||
- system-architect (系统架构师)
|
||||
- test-strategist (测试策略师)
|
||||
- ui-designer (UI 设计师)
|
||||
- ux-expert (UX 专家)
|
||||
**Available Roles**: data-architect, product-manager, product-owner, scrum-master, subject-matter-expert, system-architect, test-strategist, ui-designer, ux-expert
|
||||
|
||||
**Steps**:
|
||||
1. **Intelligent role recommendation** (AI analysis):
|
||||
- Analyze Phase 1 extracted keywords and challenges
|
||||
- Use AI reasoning to determine most relevant roles for the specific topic
|
||||
- Recommend count+2 roles (e.g., if user wants 3 roles, recommend 5 options)
|
||||
- Provide clear rationale for each recommended role based on topic context
|
||||
1. Analyze Phase 1 keywords → Recommend count+2 roles with rationale
|
||||
2. AskUserQuestion (multiSelect=true) → Store to `session.selected_roles`
|
||||
3. If count+2 > 4, split into multiple rounds
|
||||
|
||||
2. **User selection** (text interaction):
|
||||
- Output all recommended roles at once (no batching needed for count+2 roles)
|
||||
- Display roles with labels and relevance rationale
|
||||
- Wait for user input in multi-select format
|
||||
- Parse user input (support multiple formats)
|
||||
- **Storage**: Store selections to `session.selected_roles`
|
||||
|
||||
**Example Output**:
|
||||
```markdown
|
||||
===== Phase 2: 角色选择 =====
|
||||
|
||||
【角色选择】请选择 3 个角色参与头脑风暴分析
|
||||
|
||||
a) system-architect (系统架构师)
|
||||
推荐理由:实时同步架构设计和技术选型的核心角色
|
||||
b) ui-designer (UI设计师)
|
||||
推荐理由:协作界面用户体验和实时状态展示
|
||||
c) product-manager (产品经理)
|
||||
推荐理由:功能优先级和MVP范围决策
|
||||
d) data-architect (数据架构师)
|
||||
推荐理由:数据同步模型和存储方案设计
|
||||
e) ux-expert (UX专家)
|
||||
推荐理由:多用户协作交互流程优化
|
||||
|
||||
支持格式:
|
||||
- 分别选择:2a 2c 2d (选择a、c、d)
|
||||
- 合并语法:2acd (选择a、c、d)
|
||||
- 逗号分隔:2a,c,d (选择a、c、d)
|
||||
|
||||
请输入选择:
|
||||
**Example**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "请选择 3 个角色参与头脑风暴分析",
|
||||
header: "角色选择",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "system-architect", description: "实时同步架构设计和技术选型" },
|
||||
{ label: "ui-designer", description: "协作界面用户体验和状态展示" },
|
||||
{ label: "product-manager", description: "功能优先级和MVP范围决策" },
|
||||
{ label: "data-architect", description: "数据同步模型和存储方案设计" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**User input examples**:
|
||||
- `2acd` → Roles: a, c, d (system-architect, product-manager, data-architect)
|
||||
- `2a 2c 2d` → Same result
|
||||
- `2a,c,d` → Same result
|
||||
**⚠️ CRITICAL**: User MUST interact. NEVER auto-select without confirmation.
|
||||
|
||||
**Role Recommendation Rules**:
|
||||
- NO hardcoded keyword-to-role mappings
|
||||
- Use intelligent analysis of topic, challenges, and requirements
|
||||
- Consider role synergies and coverage gaps
|
||||
- Explain WHY each role is relevant to THIS specific topic
|
||||
- Default recommendation: count+2 roles for user to choose from
|
||||
|
||||
### Phase 3: Role-Specific Questions (Dynamic Generation)
|
||||
### Phase 3: Role-Specific Questions
|
||||
|
||||
**Goal**: Generate deep questions mapping role expertise to Phase 1 challenges
|
||||
|
||||
**Algorithm**:
|
||||
```
|
||||
FOR each selected role:
|
||||
1. Map Phase 1 challenges to role domain:
|
||||
- "real-time sync" + system-architect → State management pattern
|
||||
- "100 users" + system-architect → Communication protocol
|
||||
- "low latency" + system-architect → Conflict resolution
|
||||
1. FOR each selected role:
|
||||
- Map Phase 1 challenges to role domain
|
||||
- Generate 3-4 questions (implementation depth, trade-offs, edge cases)
|
||||
- AskUserQuestion per role → Store to `session.role_decisions[role]`
|
||||
2. Process roles sequentially (one at a time for clarity)
|
||||
3. If role needs > 4 questions, split into multiple rounds
|
||||
|
||||
2. Generate 3-4 questions per role probing implementation depth, trade-offs, edge cases:
|
||||
Q: "How handle real-time state sync for 100+ users?" (explores approach)
|
||||
Q: "How resolve conflicts when 2 users edit simultaneously?" (explores edge case)
|
||||
Options: [Event Sourcing/Centralized/CRDT] (concrete, explain trade-offs for THIS use case)
|
||||
|
||||
3. Output questions in text format per role:
|
||||
- Display all questions for current role (3-4 questions, no 10-question limit)
|
||||
- Questions in Chinese (用中文提问)
|
||||
- Wait for user input
|
||||
- Parse answers using intelligent parsing
|
||||
- Store answers to session.role_decisions[role]
|
||||
**Example** (system-architect):
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "100+ 用户实时状态同步方案?",
|
||||
header: "状态同步",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Event Sourcing", description: "完整事件历史,支持回溯,存储成本高" },
|
||||
{ label: "集中式状态管理", description: "实现简单,单点瓶颈风险" },
|
||||
{ label: "CRDT", description: "去中心化,自动合并,学习曲线陡" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "两个用户同时编辑冲突如何解决?",
|
||||
header: "冲突解决",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "自动合并", description: "用户无感知,可能产生意外结果" },
|
||||
{ label: "手动解决", description: "用户控制,增加交互复杂度" },
|
||||
{ label: "版本控制", description: "保留历史,需要分支管理" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Batching Strategy**:
|
||||
- Each role outputs all its questions at once (typically 3-4 questions)
|
||||
- No need to split per role (within 10-question batch limit)
|
||||
- Multiple roles processed sequentially (one role at a time for clarity)
|
||||
### Phase 4: Conflict Resolution
|
||||
|
||||
**Output Format**: Follow standard format from "User Interaction Protocol" section (single-choice question format)
|
||||
|
||||
**Example Topic-Specific Questions** (system-architect role for "real-time collaboration platform"):
|
||||
- "100+ 用户实时状态同步方案?" → Options: Event Sourcing / 集中式状态管理 / CRDT
|
||||
- "两个用户同时编辑冲突如何解决?" → Options: 自动合并 / 手动解决 / 版本控制
|
||||
- "低延迟通信协议选择?" → Options: WebSocket / SSE / 轮询
|
||||
- "系统扩展性架构方案?" → Options: 微服务 / 单体+缓存 / Serverless
|
||||
|
||||
**Quality Requirements**: See "Question Generation Guidelines" section for detailed rules
|
||||
|
||||
### Phase 4: Cross-Role Clarification (Conflict Detection)
|
||||
|
||||
**Goal**: Resolve ACTUAL conflicts from Phase 3 answers, not pre-defined relationships
|
||||
**Goal**: Resolve ACTUAL conflicts from Phase 3 answers
|
||||
|
||||
**Algorithm**:
|
||||
```
|
||||
1. Analyze Phase 3 answers for conflicts:
|
||||
- Contradictory choices: product-manager "fast iteration" vs system-architect "complex Event Sourcing"
|
||||
- Missing integration: ui-designer "Optimistic updates" but system-architect didn't address conflict handling
|
||||
- Implicit dependencies: ui-designer "Live cursors" but no auth approach defined
|
||||
|
||||
2. FOR each detected conflict:
|
||||
Generate clarification questions referencing SPECIFIC Phase 3 choices
|
||||
|
||||
3. Output clarification questions in text format:
|
||||
- Batch conflicts into rounds (max 10 questions per round)
|
||||
- Display questions with context from Phase 3 answers
|
||||
- Questions in Chinese (用中文提问)
|
||||
- Wait for user input
|
||||
- Parse answers using intelligent parsing
|
||||
- Store answers to session.cross_role_decisions
|
||||
|
||||
- Contradictory choices (e.g., "fast iteration" vs "complex Event Sourcing")
|
||||
- Missing integration (e.g., "Optimistic updates" but no conflict handling)
|
||||
- Implicit dependencies (e.g., "Live cursors" but no auth defined)
|
||||
2. Generate clarification questions referencing SPECIFIC Phase 3 choices
|
||||
3. AskUserQuestion (max 4 per call, multi-round) → Store to `session.cross_role_decisions`
|
||||
4. If NO conflicts: Skip Phase 4 (inform user: "未检测到跨角色冲突,跳过Phase 4")
|
||||
|
||||
**Example**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "CRDT 与 UI 回滚期望冲突,如何解决?\n背景:system-architect选择CRDT,ui-designer期望回滚UI",
|
||||
header: "架构冲突",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "采用 CRDT", description: "保持去中心化,调整UI期望" },
|
||||
{ label: "显示合并界面", description: "增加用户交互,展示冲突详情" },
|
||||
{ label: "切换到 OT", description: "支持回滚,增加服务器复杂度" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
**Batching Strategy**:
|
||||
- Maximum 10 clarification questions per round
|
||||
- If conflicts > 10, split into multiple rounds
|
||||
- Prioritize most critical conflicts first
|
||||
### Phase 4.5: Final Clarification
|
||||
|
||||
**Output Format**: Follow standard format from "User Interaction Protocol" section (single-choice question format with background context)
|
||||
|
||||
**Example Conflict Detection** (from Phase 3 answers):
|
||||
- **Architecture Conflict**: "CRDT 与 UI 回滚期望冲突,如何解决?"
|
||||
- Background: system-architect chose CRDT, ui-designer expects rollback UI
|
||||
- Options: 采用 CRDT / 显示合并界面 / 切换到 OT
|
||||
- **Integration Gap**: "实时光标功能缺少身份认证方案"
|
||||
- Background: ui-designer chose live cursors, no auth defined
|
||||
- Options: OAuth 2.0 / JWT Token / Session-based
|
||||
|
||||
**Quality Requirements**: See "Question Generation Guidelines" section for conflict-specific rules
|
||||
|
||||
### Phase 5: Generate Guidance Specification
|
||||
**Purpose**: Ensure no important points missed before generating specification
|
||||
|
||||
**Steps**:
|
||||
1. Load all decisions: `intent_context` + `selected_roles` + `role_decisions` + `cross_role_decisions`
|
||||
2. Transform Q&A pairs to declarative: Questions → Headers, Answers → CONFIRMED/SELECTED statements
|
||||
3. Generate guidance-specification.md (template below) - **PRIMARY OUTPUT FILE**
|
||||
4. Update workflow-session.json with **METADATA ONLY**:
|
||||
- session_id (e.g., "WFS-topic-slug")
|
||||
- selected_roles[] (array of role names, e.g., ["system-architect", "ui-designer", "product-manager"])
|
||||
- topic (original user input string)
|
||||
- timestamp (ISO-8601 format)
|
||||
- phase_completed: "artifacts"
|
||||
- count_parameter (number from --count flag)
|
||||
5. Validate: No interrogative sentences in .md file, all decisions traceable, no content duplication in .json
|
||||
1. Ask initial check:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "在生成最终规范之前,是否有前面未澄清的重点需要补充?",
|
||||
header: "补充确认",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "无需补充", description: "前面的讨论已经足够完整" },
|
||||
{ label: "需要补充", description: "还有重要内容需要澄清" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
```
|
||||
2. If "需要补充":
|
||||
- Analyze user's additional points
|
||||
- Generate progressive questions (not role-bound, interconnected)
|
||||
- AskUserQuestion (max 4 per round) → Store to `session.additional_decisions`
|
||||
- Repeat until user confirms completion
|
||||
3. If "无需补充": Proceed to Phase 5
|
||||
|
||||
**⚠️ CRITICAL OUTPUT SEPARATION**:
|
||||
- **guidance-specification.md**: Full guidance content (decisions, rationale, integration points)
|
||||
- **workflow-session.json**: Session metadata ONLY (no guidance content, no decisions, no Q&A pairs)
|
||||
- **NO content duplication**: Guidance stays in .md, metadata stays in .json
|
||||
**Progressive Pattern**: Questions interconnected, each round informs next, continue until resolved.
|
||||
|
||||
## Output Document Template
|
||||
### Phase 5: Generate Specification
|
||||
|
||||
**File**: `.workflow/WFS-{topic}/.brainstorming/guidance-specification.md`
|
||||
**Steps**:
|
||||
1. Load all decisions: `intent_context` + `selected_roles` + `role_decisions` + `cross_role_decisions` + `additional_decisions`
|
||||
2. Transform Q&A to declarative: Questions → Headers, Answers → CONFIRMED/SELECTED statements
|
||||
3. Generate `guidance-specification.md`
|
||||
4. Update `workflow-session.json` (metadata only)
|
||||
5. Validate: No interrogative sentences, all decisions traceable
|
||||
|
||||
---
|
||||
|
||||
## Question Guidelines
|
||||
|
||||
### Core Principle
|
||||
|
||||
**Target**: 开发者(理解技术但需要从用户需求出发)
|
||||
|
||||
**Question Structure**: `[业务场景/需求前提] + [技术关注点]`
|
||||
**Option Structure**: `标签:[技术方案] + 说明:[业务影响] + [技术权衡]`
|
||||
|
||||
### Quality Rules
|
||||
|
||||
**MUST Include**:
|
||||
- ✅ All questions in Chinese (用中文提问)
|
||||
- ✅ 业务场景作为问题前提
|
||||
- ✅ 技术选项的业务影响说明
|
||||
- ✅ 量化指标和约束条件
|
||||
|
||||
**MUST Avoid**:
|
||||
- ❌ 纯技术选型无业务上下文
|
||||
- ❌ 过度抽象的用户体验问题
|
||||
- ❌ 脱离话题的通用架构问题
|
||||
|
||||
### Phase-Specific Requirements
|
||||
|
||||
| Phase | Focus | Key Requirements |
|
||||
|-------|-------|------------------|
|
||||
| 1 | 意图理解 | Reference topic keywords, 用户场景、业务约束、优先级 |
|
||||
| 2 | 角色推荐 | Intelligent analysis (NOT keyword mapping), explain relevance |
|
||||
| 3 | 角色问题 | Reference Phase 1 keywords, concrete options with trade-offs |
|
||||
| 4 | 冲突解决 | Reference SPECIFIC Phase 3 choices, explain impact on both roles |
|
||||
|
||||
---
|
||||
|
||||
## Output & Governance
|
||||
|
||||
### Output Template
|
||||
|
||||
**File**: `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md`
|
||||
|
||||
```markdown
|
||||
# [Project] - Confirmed Guidance Specification
|
||||
@@ -478,9 +390,9 @@ FOR each selected role:
|
||||
|
||||
## Next Steps
|
||||
**⚠️ Automatic Continuation** (when called from auto-parallel):
|
||||
- auto-parallel will assign agents to generate role-specific analysis documents
|
||||
- Each selected role gets dedicated conceptual-planning-agent
|
||||
- Agents read this guidance-specification.md for framework context
|
||||
- auto-parallel assigns agents for role-specific analysis
|
||||
- Each selected role gets conceptual-planning-agent
|
||||
- Agents read this guidance-specification.md for context
|
||||
|
||||
## Appendix: Decision Tracking
|
||||
| Decision ID | Category | Question | Selected | Phase | Rationale |
|
||||
@@ -490,95 +402,19 @@ FOR each selected role:
|
||||
| D-003+ | [Role] | [Q] | [A] | 3 | [Why] |
|
||||
```
|
||||
|
||||
## Question Generation Guidelines
|
||||
|
||||
### Core Principle: Developer-Facing Questions with User Context
|
||||
|
||||
**Target Audience**: 开发者(理解技术但需要从用户需求出发)
|
||||
|
||||
**Generation Philosophy**:
|
||||
1. **Phase 1**: 用户场景、业务约束、优先级(建立上下文)
|
||||
2. **Phase 2**: 基于话题分析的智能角色推荐(非关键词映射)
|
||||
3. **Phase 3**: 业务需求 + 技术选型(需求驱动的技术决策)
|
||||
4. **Phase 4**: 技术冲突的业务权衡(帮助开发者理解影响)
|
||||
|
||||
### Universal Quality Rules
|
||||
|
||||
**Question Structure** (all phases):
|
||||
```
|
||||
[业务场景/需求前提] + [技术关注点]
|
||||
```
|
||||
|
||||
**Option Structure** (all phases):
|
||||
```
|
||||
标签:[技术方案简称] + (业务特征)
|
||||
说明:[业务影响] + [技术权衡]
|
||||
```
|
||||
|
||||
**MUST Include** (all phases):
|
||||
- ✅ All questions in Chinese (用中文提问)
|
||||
- ✅ 业务场景作为问题前提
|
||||
- ✅ 技术选项的业务影响说明
|
||||
- ✅ 量化指标和约束条件
|
||||
|
||||
**MUST Avoid** (all phases):
|
||||
- ❌ 纯技术选型无业务上下文
|
||||
- ❌ 过度抽象的用户体验问题
|
||||
- ❌ 脱离话题的通用架构问题
|
||||
|
||||
### Phase-Specific Requirements
|
||||
|
||||
**Phase 1 Requirements**:
|
||||
- Questions MUST reference topic keywords (NOT generic "Project type?")
|
||||
- Focus: 用户使用场景(谁用?怎么用?多频繁?)、业务约束(预算、时间、团队、合规)
|
||||
- Success metrics: 性能指标、用户体验目标
|
||||
- Priority ranking: MVP vs 长期规划
|
||||
|
||||
**Phase 3 Requirements**:
|
||||
- Questions MUST reference Phase 1 keywords (e.g., "real-time", "100 users")
|
||||
- Options MUST be concrete approaches with relevance to topic
|
||||
- Each option includes trade-offs specific to this use case
|
||||
- Include 业务需求驱动的技术问题、量化指标(并发数、延迟、可用性)
|
||||
|
||||
**Phase 4 Requirements**:
|
||||
- Questions MUST reference SPECIFIC Phase 3 choices in background context
|
||||
- Options address the detected conflict directly
|
||||
- Each option explains impact on both conflicting roles
|
||||
- NEVER use static "Cross-Role Matrix" - ALWAYS analyze actual Phase 3 answers
|
||||
- Focus: 技术冲突的业务权衡、帮助开发者理解不同选择的影响
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
Generated guidance-specification.md MUST:
|
||||
- ✅ No interrogative sentences (use CONFIRMED/SELECTED)
|
||||
- ✅ Every decision traceable to user answer
|
||||
- ✅ Cross-role conflicts resolved or documented
|
||||
- ✅ Next steps concrete and specific
|
||||
- ✅ All Phase 1-4 decisions in session metadata
|
||||
|
||||
## Update Mechanism
|
||||
### File Structure
|
||||
|
||||
```
|
||||
IF guidance-specification.md EXISTS:
|
||||
Prompt: "Regenerate completely / Update sections / Cancel"
|
||||
ELSE:
|
||||
Run full Phase 1-5 flow
|
||||
.workflow/active/WFS-[topic]/
|
||||
├── workflow-session.json # Metadata ONLY
|
||||
├── .process/
|
||||
│ └── context-package.json # Phase 0 output
|
||||
└── .brainstorming/
|
||||
└── guidance-specification.md # Full guidance content
|
||||
```
|
||||
|
||||
## Governance Rules
|
||||
### Session Metadata
|
||||
|
||||
**Output Requirements**:
|
||||
- All decisions MUST use CONFIRMED/SELECTED (NO "?" in decision sections)
|
||||
- Every decision MUST trace to user answer
|
||||
- Conflicts MUST be resolved (not marked "TBD")
|
||||
- Next steps MUST be actionable
|
||||
- Topic preserved as authoritative reference in session
|
||||
|
||||
**CRITICAL**: Guidance is single source of truth for downstream phases. Ambiguity violates governance.
|
||||
|
||||
## Storage Validation
|
||||
|
||||
**workflow-session.json** (metadata only):
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-{topic-slug}",
|
||||
@@ -591,15 +427,31 @@ ELSE:
|
||||
}
|
||||
```
|
||||
|
||||
**⚠️ Rule**: Session JSON stores ONLY metadata (session_id, selected_roles[], topic, timestamps). All guidance content goes to guidance-specification.md.
|
||||
**⚠️ Rule**: Session JSON stores ONLY metadata. All guidance content goes to guidance-specification.md.
|
||||
|
||||
## File Structure
|
||||
### Validation Checklist
|
||||
|
||||
- ✅ No interrogative sentences (use CONFIRMED/SELECTED)
|
||||
- ✅ Every decision traceable to user answer
|
||||
- ✅ Cross-role conflicts resolved or documented
|
||||
- ✅ Next steps concrete and specific
|
||||
- ✅ No content duplication between .json and .md
|
||||
|
||||
### Update Mechanism
|
||||
|
||||
```
|
||||
.workflow/WFS-[topic]/
|
||||
├── .active-brainstorming
|
||||
├── workflow-session.json # Session metadata ONLY
|
||||
└── .brainstorming/
|
||||
└── guidance-specification.md # Full guidance content
|
||||
IF guidance-specification.md EXISTS:
|
||||
Prompt: "Regenerate completely / Update sections / Cancel"
|
||||
ELSE:
|
||||
Run full Phase 0-5 flow
|
||||
```
|
||||
|
||||
### Governance Rules
|
||||
|
||||
- All decisions MUST use CONFIRMED/SELECTED (NO "?" in decision sections)
|
||||
- Every decision MUST trace to user answer
|
||||
- Conflicts MUST be resolved (not marked "TBD")
|
||||
- Next steps MUST be actionable
|
||||
- Topic preserved as authoritative reference
|
||||
|
||||
**CRITICAL**: Guidance is single source of truth for downstream phases. Ambiguity violates governance.
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
---
|
||||
name: auto-parallel
|
||||
description: Parallel brainstorming automation with dynamic role selection and concurrent execution across multiple perspectives
|
||||
argument-hint: "topic or challenge description" [--count N]
|
||||
argument-hint: "[-y|--yes] topic or challenge description [--count N]"
|
||||
allowed-tools: SlashCommand(*), Task(*), TodoWrite(*), Read(*), Write(*), Bash(*), Glob(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select recommended roles, skip all clarification questions, use default answers.
|
||||
|
||||
# Workflow Brainstorm Parallel Auto Command
|
||||
|
||||
## Coordinator Role
|
||||
|
||||
**This command is a pure orchestrator**: Execute 3 phases in sequence (interactive framework → parallel role analysis → synthesis), coordinating specialized commands/agents through task attachment model.
|
||||
**This command is a pure orchestrator**: Executes 3 phases in sequence (interactive framework → parallel role analysis → synthesis), coordinating specialized commands/agents through task attachment model.
|
||||
|
||||
**Task Attachment Model**:
|
||||
- SlashCommand invocation **expands workflow** by attaching sub-tasks to current TodoWrite
|
||||
- Task agent execution **attaches analysis tasks** to orchestrator's TodoWrite
|
||||
- SlashCommand execute **expands workflow** by attaching sub-tasks to current TodoWrite
|
||||
- Task agent execute **attaches analysis tasks** to orchestrator's TodoWrite
|
||||
- Phase 1: artifacts command attaches its internal tasks (Phase 1-5)
|
||||
- Phase 2: N conceptual-planning-agent tasks attached in parallel
|
||||
- Phase 3: synthesis command attaches its internal tasks
|
||||
@@ -26,9 +30,9 @@ allowed-tools: SlashCommand(*), Task(*), TodoWrite(*), Read(*), Write(*), Bash(*
|
||||
This workflow runs **fully autonomously** once triggered. Phase 1 (artifacts) handles user interaction, Phase 2 (role agents) runs in parallel.
|
||||
|
||||
1. **User triggers**: `/workflow:brainstorm:auto-parallel "topic" [--count N]`
|
||||
2. **Phase 1 executes** → artifacts command (tasks ATTACHED) → Auto-continues
|
||||
3. **Phase 2 executes** → Parallel role agents (N tasks ATTACHED concurrently) → Auto-continues
|
||||
4. **Phase 3 executes** → Synthesis command (tasks ATTACHED) → Reports final summary
|
||||
2. **Execute Phase 1** → artifacts command (tasks ATTACHED) → Auto-continues
|
||||
3. **Execute Phase 2** → Parallel role agents (N tasks ATTACHED concurrently) → Auto-continues
|
||||
4. **Execute Phase 3** → Synthesis command (tasks ATTACHED) → Reports final summary
|
||||
|
||||
**Auto-Continue Mechanism**:
|
||||
- TodoList tracks current phase status and dynamically manages task attachment/collapse
|
||||
@@ -38,12 +42,12 @@ This workflow runs **fully autonomously** once triggered. Phase 1 (artifacts) ha
|
||||
|
||||
## Core Rules
|
||||
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is Phase 1 command execution
|
||||
1. **Start Immediately**: First action is TodoWrite initialization, second action is execute Phase 1 command
|
||||
2. **No Preliminary Analysis**: Do not analyze topic before Phase 1 - artifacts handles all analysis
|
||||
3. **Parse Every Output**: Extract selected_roles from workflow-session.json after Phase 1
|
||||
4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically
|
||||
5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern
|
||||
6. **Task Attachment Model**: SlashCommand and Task invocations **attach** sub-tasks to current workflow. Orchestrator **executes** these attached tasks itself, then **collapses** them after completion
|
||||
6. **Task Attachment Model**: SlashCommand and Task executes **attach** sub-tasks to current workflow. Orchestrator **executes** these attached tasks itself, then **collapses** them after completion
|
||||
7. **⚠️ CRITICAL: DO NOT STOP**: Continuous multi-phase workflow. After executing all attached tasks, immediately collapse them and execute next phase
|
||||
8. **Parallel Execution**: Phase 2 attaches multiple agent tasks simultaneously for concurrent execution
|
||||
|
||||
@@ -67,7 +71,11 @@ This workflow runs **fully autonomously** once triggered. Phase 1 (artifacts) ha
|
||||
|
||||
### Phase 1: Interactive Framework Generation
|
||||
|
||||
**Command**: `SlashCommand(command="/workflow:brainstorm:artifacts \"{topic}\" --count {N}")`
|
||||
**Step 1: Execute** - Interactive framework generation via artifacts command
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:brainstorm:artifacts \"{topic}\" --count {N}")
|
||||
```
|
||||
|
||||
**What It Does**:
|
||||
- Topic analysis: Extract challenges, generate task-specific questions
|
||||
@@ -85,33 +93,34 @@ This workflow runs **fully autonomously** once triggered. Phase 1 (artifacts) ha
|
||||
**Validation**:
|
||||
- guidance-specification.md created with confirmed decisions
|
||||
- workflow-session.json contains selected_roles[] (metadata only, no content duplication)
|
||||
- Session directory `.workflow/WFS-{topic}/.brainstorming/` exists
|
||||
- Session directory `.workflow/active/WFS-{topic}/.brainstorming/` exists
|
||||
|
||||
**TodoWrite Update (Phase 1 SlashCommand invoked - tasks attached)**:
|
||||
**TodoWrite Update (Phase 1 SlashCommand executed - tasks attached)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1.1: Topic analysis and question generation (artifacts)", "status": "in_progress", "activeForm": "Analyzing topic"},
|
||||
{"content": "Phase 1.2: Role selection and user confirmation (artifacts)", "status": "pending", "activeForm": "Selecting roles"},
|
||||
{"content": "Phase 1.3: Role questions and user decisions (artifacts)", "status": "pending", "activeForm": "Collecting role questions"},
|
||||
{"content": "Phase 1.4: Conflict detection and resolution (artifacts)", "status": "pending", "activeForm": "Resolving conflicts"},
|
||||
{"content": "Phase 1.5: Guidance specification generation (artifacts)", "status": "pending", "activeForm": "Generating guidance"},
|
||||
{"content": "Execute parallel role analysis", "status": "pending", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Execute synthesis integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "in_progress", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": " → Topic analysis and question generation", "status": "in_progress", "activeForm": "Analyzing topic"},
|
||||
{"content": " → Role selection and user confirmation", "status": "pending", "activeForm": "Selecting roles"},
|
||||
{"content": " → Role questions and user decisions", "status": "pending", "activeForm": "Collecting role questions"},
|
||||
{"content": " → Conflict detection and resolution", "status": "pending", "activeForm": "Resolving conflicts"},
|
||||
{"content": " → Guidance specification generation", "status": "pending", "activeForm": "Generating guidance"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "pending", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: SlashCommand invocation **attaches** artifacts' 5 internal tasks. Orchestrator **executes** these tasks sequentially.
|
||||
**Note**: SlashCommand execute **attaches** artifacts' 5 internal tasks. Orchestrator **executes** these tasks sequentially.
|
||||
|
||||
**Next Action**: Tasks attached → **Execute Phase 1.1-1.5** sequentially
|
||||
|
||||
**TodoWrite Update (Phase 1 completed - tasks collapsed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Execute artifacts interactive framework generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Execute parallel role analysis", "status": "pending", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Execute synthesis integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "pending", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
]
|
||||
```
|
||||
|
||||
@@ -123,106 +132,63 @@ This workflow runs **fully autonomously** once triggered. Phase 1 (artifacts) ha
|
||||
|
||||
### Phase 2: Parallel Role Analysis Execution
|
||||
|
||||
**For Each Selected Role**:
|
||||
**For Each Selected Role** (unified role-analysis command):
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute {role-name} analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: {role-name}
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/{role}/
|
||||
TOPIC: {user-provided-topic}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load {role-name} planning template
|
||||
- Command: Read(~/.claude/workflows/cli-templates/planning-roles/{role}.md)
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and original user intent
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context (contains original user prompt as PRIMARY reference)
|
||||
|
||||
4. **load_style_skill** (ONLY for ui-designer role when style_skill_package exists)
|
||||
- Action: Load style SKILL package for design system reference
|
||||
- Command: Read(.claude/skills/style-{style_skill_package}/SKILL.md) AND Read(.workflow/reference_style/{style_skill_package}/design-tokens.json)
|
||||
- Output: style_skill_content, design_tokens
|
||||
- Usage: Apply design tokens in ui-designer analysis and artifacts
|
||||
|
||||
## Analysis Requirements
|
||||
**Primary Reference**: Original user prompt from workflow-session.json is authoritative
|
||||
**Framework Source**: Address all discussion points in guidance-specification.md from {role-name} perspective
|
||||
**Role Focus**: {role-name} domain expertise aligned with user intent
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive {role-name} analysis addressing all framework discussion points
|
||||
- **File Naming**: MUST start with `analysis` prefix (e.g., `analysis.md`, `analysis-1.md`, `analysis-2.md`)
|
||||
- **FORBIDDEN**: Never use `recommendations.md` or any filename not starting with `analysis`
|
||||
- **Auto-split if large**: If content >800 lines, split to `analysis-1.md`, `analysis-2.md` (max 3 files: analysis.md, analysis-1.md, analysis-2.md)
|
||||
- **Content**: Includes both analysis AND recommendations sections within analysis files
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
3. **User Intent Alignment**: Validate analysis aligns with original user objectives from session_context
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with {role-name} expertise
|
||||
- Provide actionable recommendations from {role-name} perspective within analysis files
|
||||
- All output files MUST start with `analysis` prefix (no recommendations.md or other naming)
|
||||
- Reference framework document using @ notation for integration
|
||||
- Update workflow-session.json with completion status
|
||||
"
|
||||
SlashCommand(command="/workflow:brainstorm:role-analysis {role-name} --session {session-id} --skip-questions")
|
||||
```
|
||||
|
||||
**What It Does**:
|
||||
- Unified command execution for each role
|
||||
- Loads topic framework from guidance-specification.md
|
||||
- Applies role-specific template and context
|
||||
- Generates analysis.md addressing framework discussion points
|
||||
- Supports optional interactive context gathering (via --include-questions flag)
|
||||
|
||||
**Parallel Execution**:
|
||||
- Launch N agents simultaneously (one message with multiple Task calls)
|
||||
- Each agent task **attached** to orchestrator's TodoWrite
|
||||
- All agents execute concurrently, each attaching their own analysis sub-tasks
|
||||
- Each agent operates independently reading same guidance-specification.md
|
||||
- Launch N SlashCommand calls simultaneously (one message with multiple SlashCommand invokes)
|
||||
- Each role command **attached** to orchestrator's TodoWrite
|
||||
- All roles execute concurrently, each reading same guidance-specification.md
|
||||
- Each role operates independently
|
||||
- For ui-designer only: append `--style-skill {style_skill_package}` if provided
|
||||
|
||||
**Input**:
|
||||
- `selected_roles[]` from Phase 1
|
||||
- `session_id` from Phase 1
|
||||
- guidance-specification.md path
|
||||
- `guidance-specification.md` (framework reference)
|
||||
- `style_skill_package` (for ui-designer only)
|
||||
|
||||
**Validation**:
|
||||
- Each role creates `.workflow/WFS-{topic}/.brainstorming/{role}/analysis.md` (primary file)
|
||||
- If content is large (>800 lines), may split to `analysis-1.md`, `analysis-2.md` (max 3 files total)
|
||||
- **File naming pattern**: ALL files MUST start with `analysis` prefix (use `analysis*.md` for globbing)
|
||||
- **FORBIDDEN naming**: No `recommendations.md`, `recommendations-*.md`, or any non-`analysis` prefixed files
|
||||
- Each role creates `.workflow/active/WFS-{topic}/.brainstorming/{role}/analysis.md`
|
||||
- Optionally with `analysis-{slug}.md` sub-documents (max 5)
|
||||
- **File pattern**: `analysis*.md` for globbing
|
||||
- **FORBIDDEN**: `recommendations.md` or any non-`analysis` prefixed files
|
||||
- All N role analyses completed
|
||||
|
||||
**TodoWrite Update (Phase 2 agents invoked - tasks attached in parallel)**:
|
||||
|
||||
**TodoWrite Update (Phase 2 agents executed - tasks attached in parallel)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Execute artifacts interactive framework generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2.1: Execute system-architect analysis [conceptual-planning-agent]", "status": "in_progress", "activeForm": "Executing system-architect analysis"},
|
||||
{"content": "Phase 2.2: Execute ui-designer analysis [conceptual-planning-agent]", "status": "in_progress", "activeForm": "Executing ui-designer analysis"},
|
||||
{"content": "Phase 2.3: Execute product-manager analysis [conceptual-planning-agent]", "status": "in_progress", "activeForm": "Executing product-manager analysis"},
|
||||
{"content": "Execute synthesis integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "in_progress", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": " → Execute system-architect analysis", "status": "in_progress", "activeForm": "Executing system-architect analysis"},
|
||||
{"content": " → Execute ui-designer analysis", "status": "in_progress", "activeForm": "Executing ui-designer analysis"},
|
||||
{"content": " → Execute product-manager analysis", "status": "in_progress", "activeForm": "Executing product-manager analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: Multiple Task invocations **attach** N role analysis tasks simultaneously. Orchestrator **executes** these tasks in parallel.
|
||||
**Note**: Multiple Task executes **attach** N role analysis tasks simultaneously. Orchestrator **executes** these tasks in parallel.
|
||||
|
||||
**Next Action**: Tasks attached → **Execute Phase 2.1-2.N** concurrently
|
||||
|
||||
**TodoWrite Update (Phase 2 completed - tasks collapsed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Execute artifacts interactive framework generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Execute parallel role analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Execute synthesis integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "pending", "activeForm": "Executing synthesis integration"}
|
||||
]
|
||||
```
|
||||
|
||||
@@ -234,7 +200,11 @@ TOPIC: {user-provided-topic}
|
||||
|
||||
### Phase 3: Synthesis Generation
|
||||
|
||||
**Command**: `SlashCommand(command="/workflow:brainstorm:synthesis --session {sessionId}")`
|
||||
**Step 3: Execute** - Synthesis integration via synthesis command
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:brainstorm:synthesis --session {sessionId}")
|
||||
```
|
||||
|
||||
**What It Does**:
|
||||
- Load original user intent from workflow-session.json
|
||||
@@ -245,32 +215,33 @@ TOPIC: {user-provided-topic}
|
||||
**Input**: `sessionId` from Phase 1
|
||||
|
||||
**Validation**:
|
||||
- `.workflow/WFS-{topic}/.brainstorming/synthesis-specification.md` exists
|
||||
- `.workflow/active/WFS-{topic}/.brainstorming/synthesis-specification.md` exists
|
||||
- Synthesis references all role analyses
|
||||
|
||||
**TodoWrite Update (Phase 3 SlashCommand invoked - tasks attached)**:
|
||||
**TodoWrite Update (Phase 3 SlashCommand executed - tasks attached)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Execute artifacts interactive framework generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Execute parallel role analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3.1: Load role analysis files (synthesis)", "status": "in_progress", "activeForm": "Loading role analyses"},
|
||||
{"content": "Phase 3.2: Integrate insights across roles (synthesis)", "status": "pending", "activeForm": "Integrating insights"},
|
||||
{"content": "Phase 3.3: Generate synthesis specification (synthesis)", "status": "pending", "activeForm": "Generating synthesis"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "in_progress", "activeForm": "Executing synthesis integration"},
|
||||
{"content": " → Load role analysis files", "status": "in_progress", "activeForm": "Loading role analyses"},
|
||||
{"content": " → Integrate insights across roles", "status": "pending", "activeForm": "Integrating insights"},
|
||||
{"content": " → Generate synthesis specification", "status": "pending", "activeForm": "Generating synthesis"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: SlashCommand invocation **attaches** synthesis' internal tasks. Orchestrator **executes** these tasks sequentially.
|
||||
**Note**: SlashCommand execute **attaches** synthesis' internal tasks. Orchestrator **executes** these tasks sequentially.
|
||||
|
||||
**Next Action**: Tasks attached → **Execute Phase 3.1-3.3** sequentially
|
||||
|
||||
**TodoWrite Update (Phase 3 completed - tasks collapsed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Parse --count parameter from user input", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Execute artifacts interactive framework generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Execute parallel role analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Execute synthesis integration", "status": "completed", "activeForm": "Executing synthesis integration"}
|
||||
{"content": "Phase 0: Parameter Parsing", "status": "completed", "activeForm": "Parsing count parameter"},
|
||||
{"content": "Phase 1: Interactive Framework Generation", "status": "completed", "activeForm": "Executing artifacts interactive framework"},
|
||||
{"content": "Phase 2: Parallel Role Analysis", "status": "completed", "activeForm": "Executing parallel role analysis"},
|
||||
{"content": "Phase 3: Synthesis Integration", "status": "completed", "activeForm": "Executing synthesis integration"}
|
||||
]
|
||||
```
|
||||
|
||||
@@ -280,7 +251,7 @@ TOPIC: {user-provided-topic}
|
||||
```
|
||||
Brainstorming complete for session: {sessionId}
|
||||
Roles analyzed: {count}
|
||||
Synthesis: .workflow/WFS-{topic}/.brainstorming/synthesis-specification.md
|
||||
Synthesis: .workflow/active/WFS-{topic}/.brainstorming/synthesis-specification.md
|
||||
|
||||
✅ Next Steps:
|
||||
1. /workflow:concept-clarify --session {sessionId} # Optional refinement
|
||||
@@ -293,7 +264,7 @@ Synthesis: .workflow/WFS-{topic}/.brainstorming/synthesis-specification.md
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Task Attachment** (when SlashCommand/Task invoked):
|
||||
1. **Task Attachment** (when SlashCommand/Task executed):
|
||||
- Sub-command's or agent's internal tasks are **attached** to orchestrator's TodoWrite
|
||||
- Phase 1: `/workflow:brainstorm:artifacts` attaches 5 internal tasks (Phase 1.1-1.5)
|
||||
- Phase 2: Multiple `Task(conceptual-planning-agent)` calls attach N role analysis tasks simultaneously
|
||||
@@ -314,7 +285,7 @@ Synthesis: .workflow/WFS-{topic}/.brainstorming/synthesis-specification.md
|
||||
- No user intervention required between phases
|
||||
- TodoWrite dynamically reflects current execution state
|
||||
|
||||
**Lifecycle Summary**: Initial pending tasks → Phase 1 invoked (artifacts tasks ATTACHED) → Artifacts sub-tasks executed → Phase 1 completed (tasks COLLAPSED) → Phase 2 invoked (N role tasks ATTACHED in parallel) → Role analyses executed concurrently → Phase 2 completed (tasks COLLAPSED) → Phase 3 invoked (synthesis tasks ATTACHED) → Synthesis sub-tasks executed → Phase 3 completed (tasks COLLAPSED) → Workflow complete.
|
||||
**Lifecycle Summary**: Initial pending tasks → Phase 1 executed (artifacts tasks ATTACHED) → Artifacts sub-tasks executed → Phase 1 completed (tasks COLLAPSED) → Phase 2 executed (N role tasks ATTACHED in parallel) → Role analyses executed concurrently → Phase 2 completed (tasks COLLAPSED) → Phase 3 executed (synthesis tasks ATTACHED) → Synthesis sub-tasks executed → Phase 3 completed (tasks COLLAPSED) → Workflow complete.
|
||||
|
||||
### Brainstorming Workflow Specific Features
|
||||
|
||||
@@ -324,14 +295,6 @@ Synthesis: .workflow/WFS-{topic}/.brainstorming/synthesis-specification.md
|
||||
- **Dynamic Role Count**: `--count N` parameter determines number of Phase 2 parallel tasks (default: 3, max: 9)
|
||||
- **Mixed Execution**: Sequential (Phase 1, 3) and Parallel (Phase 2) task execution
|
||||
|
||||
**Benefits**:
|
||||
- Real-time visibility into attached tasks during execution
|
||||
- Clean orchestrator-level summary after tasks complete
|
||||
- Clear mental model: SlashCommand/Task = attach tasks, not delegate work
|
||||
- Parallel execution support for concurrent role analysis
|
||||
- Dynamic attachment/collapse maintains clarity
|
||||
|
||||
**Note**: See individual Phase descriptions (Phase 1, 2, 3) for detailed TodoWrite Update examples with full JSON structures.
|
||||
|
||||
## Input Processing
|
||||
|
||||
@@ -392,31 +355,31 @@ CONTEXT_VARS:
|
||||
|
||||
## Session Management
|
||||
|
||||
**⚡ FIRST ACTION**: Check for `.workflow/.active-*` markers before Phase 1
|
||||
**⚡ FIRST ACTION**: Check `.workflow/active/` for existing sessions before Phase 1
|
||||
|
||||
**Multiple Sessions Support**:
|
||||
- Different Claude instances can have different active brainstorming sessions
|
||||
- If multiple active sessions found, prompt user to select
|
||||
- If single active session found, use it
|
||||
- If no active session exists, create `WFS-[topic-slug]`
|
||||
- Different Claude instances can have different brainstorming sessions
|
||||
- If multiple sessions found, prompt user to select
|
||||
- If single session found, use it
|
||||
- If no session exists, create `WFS-[topic-slug]`
|
||||
|
||||
**Session Continuity**:
|
||||
- MUST use selected active session for all phases
|
||||
- MUST use selected session for all phases
|
||||
- Each role's context stored in session directory
|
||||
- Session isolation: Each session maintains independent state
|
||||
|
||||
## Output Structure
|
||||
|
||||
**Phase 1 Output**:
|
||||
- `.workflow/WFS-{topic}/.brainstorming/guidance-specification.md` (framework content)
|
||||
- `.workflow/WFS-{topic}/workflow-session.json` (metadata: selected_roles[], topic, timestamps, style_skill_package)
|
||||
- `.workflow/active/WFS-{topic}/.brainstorming/guidance-specification.md` (framework content)
|
||||
- `.workflow/active/WFS-{topic}/workflow-session.json` (metadata: selected_roles[], topic, timestamps, style_skill_package)
|
||||
|
||||
**Phase 2 Output**:
|
||||
- `.workflow/WFS-{topic}/.brainstorming/{role}/analysis.md` (one per role)
|
||||
- `.workflow/active/WFS-{topic}/.brainstorming/{role}/analysis.md` (one per role)
|
||||
- `.superdesign/design_iterations/` (ui-designer artifacts, if --style-skill provided)
|
||||
|
||||
**Phase 3 Output**:
|
||||
- `.workflow/WFS-{topic}/.brainstorming/synthesis-specification.md` (integrated analysis)
|
||||
- `.workflow/active/WFS-{topic}/.brainstorming/synthesis-specification.md` (integrated analysis)
|
||||
|
||||
**⚠️ Storage Separation**: Guidance content in .md files, metadata in .json (no duplication)
|
||||
**⚠️ Style References**: When --style-skill provided, workflow-session.json stores style_skill_package name, ui-designer loads from `.claude/skills/style-{package-name}/`
|
||||
@@ -441,24 +404,30 @@ CONTEXT_VARS:
|
||||
- **Agent execution failure**: Agent-specific retry with minimal dependencies
|
||||
- **Template loading issues**: Agent handles graceful degradation
|
||||
- **Synthesis conflicts**: Synthesis highlights disagreements without resolution
|
||||
- **Context overflow protection**: See below for automatic context management
|
||||
|
||||
## Context Overflow Protection
|
||||
|
||||
**Per-role limits**: See `conceptual-planning-agent.md` (< 3000 words main, < 2000 words sub-docs, max 5 sub-docs)
|
||||
|
||||
**Synthesis protection**: If total analysis > 100KB, synthesis reads only `analysis.md` files (not sub-documents)
|
||||
|
||||
**Recovery**: Check logs → reduce scope (--count 2) → use --summary-only → manual synthesis
|
||||
|
||||
**Prevention**: Start with --count 3, use structured topic format, review output sizes before synthesis
|
||||
|
||||
## Reference Information
|
||||
|
||||
**File Structure**:
|
||||
```
|
||||
.workflow/WFS-[topic]/
|
||||
├── .active-brainstorming
|
||||
.workflow/active/WFS-[topic]/
|
||||
├── workflow-session.json # Session metadata ONLY
|
||||
└── .brainstorming/
|
||||
├── guidance-specification.md # Framework (Phase 1)
|
||||
├── {role-1}/
|
||||
│ └── analysis.md # Role analysis (Phase 2)
|
||||
├── {role-2}/
|
||||
│ └── analysis.md
|
||||
├── {role-N}/
|
||||
│ └── analysis.md
|
||||
├── {role}/
|
||||
│ ├── analysis.md # Main document (with optional @references)
|
||||
│ └── analysis-{slug}.md # Section documents (max 5)
|
||||
└── synthesis-specification.md # Integration (Phase 3)
|
||||
```
|
||||
|
||||
**Template Source**: `~/.claude/workflows/cli-templates/planning-roles/`
|
||||
|
||||
|
||||
@@ -1,220 +0,0 @@
|
||||
---
|
||||
name: data-architect
|
||||
description: Generate or update data-architect/analysis.md addressing guidance-specification discussion points for data architecture perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 📊 **Data Architect Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating data-architect/analysis.md** that addresses guidance-specification.md discussion points from data architecture perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Data Architecture Focus**: Data models, pipelines, governance, and analytics perspective
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Data Model Design**: Efficient and scalable data models and schemas
|
||||
- **Data Flow Design**: Data collection, processing, and storage workflows
|
||||
- **Data Quality Management**: Data accuracy, completeness, and consistency
|
||||
- **Analytics and Insights**: Data analysis and business intelligence solutions
|
||||
|
||||
### Role Boundaries & Responsibilities
|
||||
|
||||
#### **What This Role OWNS (Canonical Data Model - Source of Truth)**
|
||||
- **Canonical Data Model**: The authoritative, system-wide data schema representing domain entities and relationships
|
||||
- **Entity-Relationship Design**: Defining entities, attributes, relationships, and constraints
|
||||
- **Data Normalization & Optimization**: Ensuring data integrity, reducing redundancy, and optimizing storage
|
||||
- **Database Schema Design**: Physical database structures, indexes, partitioning strategies
|
||||
- **Data Pipeline Architecture**: ETL/ELT processes, data warehousing, and analytics pipelines
|
||||
- **Data Governance**: Data quality standards, retention policies, and compliance requirements
|
||||
|
||||
#### **What This Role DOES NOT Own (Defers to Other Roles)**
|
||||
- **API Data Contracts**: Public-facing request/response schemas exposed by APIs → Defers to **API Designer**
|
||||
- **System Integration Patterns**: How services communicate at the macro level → Defers to **System Architect**
|
||||
- **UI Data Presentation**: How data is displayed to users → Defers to **UI Designer**
|
||||
|
||||
#### **Handoff Points**
|
||||
- **TO API Designer**: Provides canonical data model that API Designer translates into public API data contracts (as projection/view)
|
||||
- **TO System Architect**: Provides data flow requirements and storage constraints to inform system design
|
||||
- **FROM System Architect**: Receives system-level integration requirements and scalability constraints
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute data-architect analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: data-architect
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/data-architect/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load data-architect planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/data-architect.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from data architecture perspective
|
||||
**Role Focus**: Data models, pipelines, governance, analytics platforms
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive data architecture analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with data architecture expertise
|
||||
- Provide data model designs, pipeline architectures, and governance strategies
|
||||
- Include scalability, performance, and quality considerations
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute data-architect analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing data-architect framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured data-architect analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with data-architect completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/data-architect/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# Data Architect Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: Data Architecture perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with data architecture expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[Data architecture perspective on requirements]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Data model, pipeline, and storage considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[Data access patterns and analytics user experience]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Data migration, quality, and governance challenges]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Data quality metrics and analytics success criteria]
|
||||
|
||||
## Data Architecture Specific Recommendations
|
||||
[Role-specific data architecture recommendations and solutions]
|
||||
|
||||
---
|
||||
*Generated by data-architect analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"data_architect": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/data-architect/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: Data architecture insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
name: product-manager
|
||||
description: Generate or update product-manager/analysis.md addressing guidance-specification discussion points for product management perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎯 **Product Manager Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating product-manager/analysis.md** that addresses guidance-specification.md discussion points from product strategy perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Product Strategy Focus**: User needs, business value, and market positioning
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **User Needs Analysis**: Target users, problems, and value propositions
|
||||
- **Business Impact Assessment**: ROI, metrics, and commercial outcomes
|
||||
- **Market Positioning**: Competitive analysis and differentiation
|
||||
- **Product Strategy**: Roadmaps, priorities, and go-to-market approaches
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute product-manager analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: product-manager
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/product-manager/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load product-manager planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/product-manager.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from product strategy perspective
|
||||
**Role Focus**: User value, business impact, market positioning, product strategy
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive product strategy analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with product management expertise
|
||||
- Provide actionable business strategies and user value propositions
|
||||
- Include market analysis and competitive positioning insights
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute product-manager analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing product-manager framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured product-manager analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with product-manager completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/product-manager/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# Product Manager Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: Product Strategy perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with product management expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[Product strategy perspective on user needs and requirements]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Business and technical feasibility considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[User value proposition and market positioning analysis]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Business execution and go-to-market considerations]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Product success metrics and business KPIs]
|
||||
|
||||
## Product Strategy Specific Recommendations
|
||||
[Role-specific product management strategies and business solutions]
|
||||
|
||||
---
|
||||
*Generated by product-manager analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"product_manager": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/product-manager/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: Product strategy insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
name: product-owner
|
||||
description: Generate or update product-owner/analysis.md addressing guidance-specification discussion points for product ownership perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎯 **Product Owner Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating product-owner/analysis.md** that addresses guidance-specification.md discussion points from product backlog and feature prioritization perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Product Backlog Focus**: Feature prioritization, user stories, and acceptance criteria
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Backlog Management**: User story creation, refinement, and prioritization
|
||||
- **Stakeholder Alignment**: Requirements gathering, value definition, and expectation management
|
||||
- **Feature Prioritization**: ROI analysis, MoSCoW method, and value-driven delivery
|
||||
- **Acceptance Criteria**: Definition of Done, acceptance testing, and quality standards
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute product-owner analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: product-owner
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/product-owner/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load product-owner planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/product-owner.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from product backlog and feature prioritization perspective
|
||||
**Role Focus**: Backlog management, stakeholder alignment, feature prioritization, acceptance criteria
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive product ownership analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with product ownership expertise
|
||||
- Provide actionable user stories and acceptance criteria definitions
|
||||
- Include feature prioritization and stakeholder alignment strategies
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute product-owner analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing product-owner framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured product-owner analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with product-owner completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/product-owner/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# Product Owner Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: Product Backlog & Feature Prioritization perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with product ownership expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[User story formulation and backlog refinement perspective]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Technical feasibility and implementation sequencing considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[User value definition and acceptance criteria analysis]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Sprint planning, dependency management, and delivery strategies]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Feature adoption, value delivery metrics, and stakeholder satisfaction indicators]
|
||||
|
||||
## Product Owner Specific Recommendations
|
||||
[Role-specific backlog management and feature prioritization strategies]
|
||||
|
||||
---
|
||||
*Generated by product-owner analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"product_owner": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/product-owner/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: Product ownership insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
705
.claude/commands/workflow/brainstorm/role-analysis.md
Normal file
705
.claude/commands/workflow/brainstorm/role-analysis.md
Normal file
@@ -0,0 +1,705 @@
|
||||
---
|
||||
name: role-analysis
|
||||
description: Unified role-specific analysis generation with interactive context gathering and incremental updates
|
||||
argument-hint: "[role-name] [--session session-id] [--update] [--include-questions] [--skip-questions]"
|
||||
allowed-tools: Task(conceptual-planning-agent), AskUserQuestion(*), TodoWrite(*), Read(*), Write(*), Edit(*), Glob(*)
|
||||
---
|
||||
|
||||
## 🎯 **Unified Role Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Unified command for generating and updating role-specific analysis** with interactive context gathering, framework alignment, and incremental update support. Replaces 9 individual role commands with single parameterized workflow.
|
||||
|
||||
### Core Function
|
||||
- **Multi-Role Support**: Single command supports all 9 brainstorming roles
|
||||
- **Interactive Context**: Dynamic question generation based on role and framework
|
||||
- **Incremental Updates**: Merge new insights into existing analyses
|
||||
- **Framework Alignment**: Address guidance-specification.md discussion points
|
||||
- **Agent Delegation**: Use conceptual-planning-agent with role-specific templates
|
||||
|
||||
### Supported Roles
|
||||
|
||||
| Role ID | Title | Focus Area | Context Questions |
|
||||
|---------|-------|------------|-------------------|
|
||||
| `ux-expert` | UX专家 | User research, information architecture, user journey | 4 |
|
||||
| `ui-designer` | UI设计师 | Visual design, high-fidelity mockups, design systems | 4 |
|
||||
| `system-architect` | 系统架构师 | Technical architecture, scalability, integration patterns | 5 |
|
||||
| `product-manager` | 产品经理 | Product strategy, roadmap, prioritization | 4 |
|
||||
| `product-owner` | 产品负责人 | Backlog management, user stories, acceptance criteria | 4 |
|
||||
| `scrum-master` | 敏捷教练 | Process facilitation, impediment removal, team dynamics | 3 |
|
||||
| `subject-matter-expert` | 领域专家 | Domain knowledge, business rules, compliance | 4 |
|
||||
| `data-architect` | 数据架构师 | Data models, storage strategies, data flow | 5 |
|
||||
| `api-designer` | API设计师 | API contracts, versioning, integration patterns | 4 |
|
||||
|
||||
---
|
||||
|
||||
## 📋 **Usage**
|
||||
|
||||
```bash
|
||||
# Generate new analysis with interactive context
|
||||
/workflow:brainstorm:role-analysis ux-expert
|
||||
|
||||
# Generate with existing framework + context questions
|
||||
/workflow:brainstorm:role-analysis system-architect --session WFS-xxx --include-questions
|
||||
|
||||
# Update existing analysis (incremental merge)
|
||||
/workflow:brainstorm:role-analysis ui-designer --session WFS-xxx --update
|
||||
|
||||
# Quick generation (skip interactive context)
|
||||
/workflow:brainstorm:role-analysis product-manager --session WFS-xxx --skip-questions
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Detection & Validation
|
||||
|
||||
**Step 1.1: Role Validation**
|
||||
```bash
|
||||
VALIDATE role_name IN [
|
||||
ux-expert, ui-designer, system-architect, product-manager,
|
||||
product-owner, scrum-master, subject-matter-expert,
|
||||
data-architect, api-designer
|
||||
]
|
||||
IF invalid:
|
||||
ERROR: "Unknown role: {role_name}. Use one of: ux-expert, ui-designer, ..."
|
||||
EXIT
|
||||
```
|
||||
|
||||
**Step 1.2: Session Detection**
|
||||
```bash
|
||||
IF --session PROVIDED:
|
||||
session_id = --session
|
||||
brainstorm_dir = .workflow/active/{session_id}/.brainstorming/
|
||||
ELSE:
|
||||
FIND .workflow/active/WFS-*/
|
||||
IF multiple:
|
||||
PROMPT user to select
|
||||
ELSE IF single:
|
||||
USE existing
|
||||
ELSE:
|
||||
ERROR: "No active session. Run /workflow:brainstorm:artifacts first"
|
||||
EXIT
|
||||
|
||||
VALIDATE brainstorm_dir EXISTS
|
||||
```
|
||||
|
||||
**Step 1.3: Framework Detection**
|
||||
```bash
|
||||
framework_file = {brainstorm_dir}/guidance-specification.md
|
||||
IF framework_file EXISTS:
|
||||
framework_mode = true
|
||||
LOAD framework_content
|
||||
ELSE:
|
||||
WARN: "No framework found - will create standalone analysis"
|
||||
framework_mode = false
|
||||
```
|
||||
|
||||
**Step 1.4: Update Mode Detection**
|
||||
```bash
|
||||
existing_analysis = {brainstorm_dir}/{role_name}/analysis*.md
|
||||
IF --update FLAG OR existing_analysis EXISTS:
|
||||
update_mode = true
|
||||
IF --update NOT PROVIDED:
|
||||
ASK: "Analysis exists. Update or regenerate?"
|
||||
OPTIONS: ["Incremental update", "Full regenerate", "Cancel"]
|
||||
ELSE:
|
||||
update_mode = false
|
||||
```
|
||||
|
||||
### Phase 2: Interactive Context Gathering
|
||||
|
||||
**Trigger Conditions**:
|
||||
- Default: Always ask unless `--skip-questions` provided
|
||||
- `--include-questions`: Force context gathering even if analysis exists
|
||||
- `--skip-questions`: Skip all interactive questions
|
||||
|
||||
**Step 2.1: Load Role Configuration**
|
||||
```javascript
|
||||
const roleConfig = {
|
||||
'ux-expert': {
|
||||
title: 'UX专家',
|
||||
focus_area: 'User research, information architecture, user journey',
|
||||
question_categories: ['User Intent', 'Requirements', 'UX'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/ux-expert.md'
|
||||
},
|
||||
'ui-designer': {
|
||||
title: 'UI设计师',
|
||||
focus_area: 'Visual design, high-fidelity mockups, design systems',
|
||||
question_categories: ['Requirements', 'UX', 'Feasibility'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/ui-designer.md'
|
||||
},
|
||||
'system-architect': {
|
||||
title: '系统架构师',
|
||||
focus_area: 'Technical architecture, scalability, integration patterns',
|
||||
question_categories: ['Scale & Performance', 'Technical Constraints', 'Architecture Complexity', 'Non-Functional Requirements'],
|
||||
question_count: 5,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/system-architect.md'
|
||||
},
|
||||
'product-manager': {
|
||||
title: '产品经理',
|
||||
focus_area: 'Product strategy, roadmap, prioritization',
|
||||
question_categories: ['User Intent', 'Requirements', 'Process'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/product-manager.md'
|
||||
},
|
||||
'product-owner': {
|
||||
title: '产品负责人',
|
||||
focus_area: 'Backlog management, user stories, acceptance criteria',
|
||||
question_categories: ['Requirements', 'Decisions', 'Process'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/product-owner.md'
|
||||
},
|
||||
'scrum-master': {
|
||||
title: '敏捷教练',
|
||||
focus_area: 'Process facilitation, impediment removal, team dynamics',
|
||||
question_categories: ['Process', 'Risk', 'Decisions'],
|
||||
question_count: 3,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/scrum-master.md'
|
||||
},
|
||||
'subject-matter-expert': {
|
||||
title: '领域专家',
|
||||
focus_area: 'Domain knowledge, business rules, compliance',
|
||||
question_categories: ['Requirements', 'Feasibility', 'Terminology'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/subject-matter-expert.md'
|
||||
},
|
||||
'data-architect': {
|
||||
title: '数据架构师',
|
||||
focus_area: 'Data models, storage strategies, data flow',
|
||||
question_categories: ['Architecture', 'Scale & Performance', 'Technical Constraints', 'Feasibility'],
|
||||
question_count: 5,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/data-architect.md'
|
||||
},
|
||||
'api-designer': {
|
||||
title: 'API设计师',
|
||||
focus_area: 'API contracts, versioning, integration patterns',
|
||||
question_categories: ['Architecture', 'Requirements', 'Feasibility', 'Decisions'],
|
||||
question_count: 4,
|
||||
template: '~/.claude/workflows/cli-templates/planning-roles/api-designer.md'
|
||||
}
|
||||
};
|
||||
|
||||
config = roleConfig[role_name];
|
||||
```
|
||||
|
||||
**Step 2.2: Generate Role-Specific Questions**
|
||||
|
||||
**9-Category Taxonomy** (from synthesis.md):
|
||||
|
||||
| Category | Focus | Example Question Pattern |
|
||||
|----------|-------|--------------------------|
|
||||
| User Intent | 用户目标 | "该分析的核心目标是什么?" |
|
||||
| Requirements | 需求细化 | "需求的优先级如何排序?" |
|
||||
| Architecture | 架构决策 | "技术栈的选择考量?" |
|
||||
| UX | 用户体验 | "交互复杂度的取舍?" |
|
||||
| Feasibility | 可行性 | "资源约束下的实现范围?" |
|
||||
| Risk | 风险管理 | "风险容忍度是多少?" |
|
||||
| Process | 流程规范 | "开发迭代的节奏?" |
|
||||
| Decisions | 决策确认 | "冲突的解决方案?" |
|
||||
| Terminology | 术语统一 | "统一使用哪个术语?" |
|
||||
| Scale & Performance | 性能扩展 | "预期的负载和性能要求?" |
|
||||
| Technical Constraints | 技术约束 | "现有技术栈的限制?" |
|
||||
| Architecture Complexity | 架构复杂度 | "架构的复杂度权衡?" |
|
||||
| Non-Functional Requirements | 非功能需求 | "可用性和可维护性要求?" |
|
||||
|
||||
**Question Generation Algorithm**:
|
||||
```javascript
|
||||
async function generateQuestions(role_name, framework_content) {
|
||||
const config = roleConfig[role_name];
|
||||
const questions = [];
|
||||
|
||||
// Parse framework for keywords
|
||||
const keywords = extractKeywords(framework_content);
|
||||
|
||||
// Generate category-specific questions
|
||||
for (const category of config.question_categories) {
|
||||
const question = generateCategoryQuestion(category, keywords, role_name);
|
||||
questions.push(question);
|
||||
}
|
||||
|
||||
return questions.slice(0, config.question_count);
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2.3: Multi-Round Question Execution**
|
||||
|
||||
```javascript
|
||||
const BATCH_SIZE = 4;
|
||||
const user_context = {};
|
||||
|
||||
for (let i = 0; i < questions.length; i += BATCH_SIZE) {
|
||||
const batch = questions.slice(i, i + BATCH_SIZE);
|
||||
const currentRound = Math.floor(i / BATCH_SIZE) + 1;
|
||||
const totalRounds = Math.ceil(questions.length / BATCH_SIZE);
|
||||
|
||||
console.log(`\n[Round ${currentRound}/${totalRounds}] ${config.title} 上下文询问\n`);
|
||||
|
||||
AskUserQuestion({
|
||||
questions: batch.map(q => ({
|
||||
question: q.question,
|
||||
header: q.category.substring(0, 12),
|
||||
multiSelect: false,
|
||||
options: q.options.map(opt => ({
|
||||
label: opt.label,
|
||||
description: opt.description
|
||||
}))
|
||||
}))
|
||||
});
|
||||
|
||||
// Store responses before next round
|
||||
for (const answer of responses) {
|
||||
user_context[answer.question] = {
|
||||
answer: answer.selected,
|
||||
category: answer.category,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Save context to file
|
||||
Write(
|
||||
`${brainstorm_dir}/${role_name}/${role_name}-context.md`,
|
||||
formatUserContext(user_context)
|
||||
);
|
||||
```
|
||||
|
||||
**Question Quality Rules** (from artifacts.md):
|
||||
|
||||
**MUST Include**:
|
||||
- ✅ All questions in Chinese (用中文提问)
|
||||
- ✅ 业务场景作为问题前提
|
||||
- ✅ 技术选项的业务影响说明
|
||||
- ✅ 量化指标和约束条件
|
||||
|
||||
**MUST Avoid**:
|
||||
- ❌ 纯技术选型无业务上下文
|
||||
- ❌ 过度抽象的通用问题
|
||||
- ❌ 脱离框架的重复询问
|
||||
|
||||
### Phase 3: Agent Execution
|
||||
|
||||
**Step 3.1: Load Session Metadata**
|
||||
```bash
|
||||
session_metadata = Read(.workflow/active/{session_id}/workflow-session.json)
|
||||
original_topic = session_metadata.topic
|
||||
selected_roles = session_metadata.selected_roles
|
||||
```
|
||||
|
||||
**Step 3.2: Prepare Agent Context**
|
||||
```javascript
|
||||
const agentContext = {
|
||||
role_name: role_name,
|
||||
role_config: roleConfig[role_name],
|
||||
output_location: `${brainstorm_dir}/${role_name}/`,
|
||||
framework_mode: framework_mode,
|
||||
framework_path: framework_mode ? `${brainstorm_dir}/guidance-specification.md` : null,
|
||||
update_mode: update_mode,
|
||||
user_context: user_context,
|
||||
original_topic: original_topic,
|
||||
session_id: session_id
|
||||
};
|
||||
```
|
||||
|
||||
**Step 3.3: Execute Conceptual Planning Agent**
|
||||
|
||||
**Framework-Based Analysis** (when guidance-specification.md exists):
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="conceptual-planning-agent",
|
||||
run_in_background=false,
|
||||
description=`Generate ${role_name} analysis`,
|
||||
prompt=`
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute ${role_name} analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: ${role_name}
|
||||
OUTPUT_LOCATION: ${agentContext.output_location}
|
||||
ANALYSIS_MODE: ${framework_mode ? "framework_based" : "standalone"}
|
||||
UPDATE_MODE: ${update_mode}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(${agentContext.framework_path})
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load ${role_name} planning template
|
||||
- Command: Read(${roleConfig[role_name].template})
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and user intent
|
||||
- Command: Read(.workflow/active/${session_id}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
4. **load_user_context** (if exists)
|
||||
- Action: Load interactive context responses
|
||||
- Command: Read(${brainstorm_dir}/${role_name}/${role_name}-context.md)
|
||||
- Output: user_context_answers
|
||||
|
||||
5. **${update_mode ? 'load_existing_analysis' : 'skip'}**
|
||||
${update_mode ? `
|
||||
- Action: Load existing analysis for incremental update
|
||||
- Command: Read(${brainstorm_dir}/${role_name}/analysis.md)
|
||||
- Output: existing_analysis_content
|
||||
` : ''}
|
||||
|
||||
## Analysis Requirements
|
||||
**Primary Reference**: Original user prompt from workflow-session.json is authoritative
|
||||
**Framework Source**: Address all discussion points in guidance-specification.md from ${role_name} perspective
|
||||
**User Context Integration**: Incorporate interactive Q&A responses into analysis
|
||||
**Role Focus**: ${roleConfig[role_name].focus_area}
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md** (main document, optionally with analysis-{slug}.md sub-documents)
|
||||
2. **Framework Reference**: @../guidance-specification.md (if framework_mode)
|
||||
3. **User Context Reference**: @./${role_name}-context.md (if user context exists)
|
||||
4. **User Intent Alignment**: Validate against session_context
|
||||
|
||||
## Update Requirements (if UPDATE_MODE)
|
||||
- **Preserve Structure**: Maintain existing analysis structure
|
||||
- **Add "Clarifications" Section**: Document new user context with timestamp
|
||||
- **Merge Insights**: Integrate new perspectives without removing existing content
|
||||
- **Resolve Conflicts**: If new context contradicts existing analysis, document both and recommend resolution
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with ${role_name} expertise
|
||||
- Provide actionable recommendations from ${role_name} perspective within analysis files
|
||||
- All output files MUST start with "analysis" prefix (no recommendations.md or other naming)
|
||||
- Reference framework document using @ notation for integration
|
||||
- Update workflow-session.json with completion status
|
||||
`
|
||||
);
|
||||
```
|
||||
|
||||
### Phase 4: Validation & Finalization
|
||||
|
||||
**Step 4.1: Validate Output**
|
||||
```bash
|
||||
VERIFY EXISTS: ${brainstorm_dir}/${role_name}/analysis.md
|
||||
VERIFY CONTAINS: "@../guidance-specification.md" (if framework_mode)
|
||||
IF user_context EXISTS:
|
||||
VERIFY CONTAINS: "@./${role_name}-context.md" OR "## Clarifications" section
|
||||
```
|
||||
|
||||
**Step 4.2: Update Session Metadata**
|
||||
```json
|
||||
{
|
||||
"phases": {
|
||||
"BRAINSTORM": {
|
||||
"${role_name}": {
|
||||
"status": "${update_mode ? 'updated' : 'completed'}",
|
||||
"completed_at": "timestamp",
|
||||
"framework_addressed": true,
|
||||
"context_gathered": user_context ? true : false,
|
||||
"output_location": "${brainstorm_dir}/${role_name}/analysis.md",
|
||||
"update_history": [
|
||||
{
|
||||
"timestamp": "ISO8601",
|
||||
"mode": "${update_mode ? 'incremental' : 'initial'}",
|
||||
"context_questions": question_count
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4.3: Completion Report**
|
||||
```markdown
|
||||
✅ ${roleConfig[role_name].title} Analysis Complete
|
||||
|
||||
**Output**: ${brainstorm_dir}/${role_name}/analysis.md
|
||||
**Mode**: ${update_mode ? 'Incremental Update' : 'New Generation'}
|
||||
**Framework**: ${framework_mode ? '✓ Aligned' : '✗ Standalone'}
|
||||
**Context Questions**: ${question_count} answered
|
||||
|
||||
${update_mode ? '
|
||||
**Changes**:
|
||||
- Added "Clarifications" section with new user context
|
||||
- Merged new insights into existing sections
|
||||
- Resolved conflicts with framework alignment
|
||||
' : ''}
|
||||
|
||||
**Next Steps**:
|
||||
${selected_roles.length > 1 ? `
|
||||
- Continue with other roles: ${selected_roles.filter(r => r !== role_name).join(', ')}
|
||||
- Run synthesis: /workflow:brainstorm:synthesis --session ${session_id}
|
||||
` : `
|
||||
- Clarify insights: /workflow:brainstorm:synthesis --session ${session_id}
|
||||
- Generate plan: /workflow:plan --session ${session_id}
|
||||
`}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Phase 1: Detect session and validate role configuration",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and role"
|
||||
},
|
||||
{
|
||||
content: "Phase 2: Interactive context gathering with AskUserQuestion",
|
||||
status: "pending",
|
||||
activeForm: "Gathering user context"
|
||||
},
|
||||
{
|
||||
content: "Phase 3: Execute conceptual-planning-agent for role analysis",
|
||||
status: "pending",
|
||||
activeForm: "Executing agent analysis"
|
||||
},
|
||||
{
|
||||
content: "Phase 4: Validate output and update session metadata",
|
||||
status: "pending",
|
||||
activeForm: "Finalizing and validating"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Directory Layout
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session}/.brainstorming/
|
||||
├── guidance-specification.md # Framework (if exists)
|
||||
└── {role-name}/
|
||||
├── {role-name}-context.md # Interactive Q&A responses
|
||||
├── analysis.md # Main analysis (REQUIRED)
|
||||
└── analysis-{slug}.md # Section documents (optional, max 5)
|
||||
```
|
||||
|
||||
### Analysis Document Structure (New Generation)
|
||||
|
||||
```markdown
|
||||
# ${roleConfig[role_name].title} Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: ${roleConfig[role_name].focus_area}
|
||||
**User Context**: @./${role_name}-context.md
|
||||
|
||||
## User Context Summary
|
||||
**Context Gathered**: ${question_count} questions answered
|
||||
**Categories**: ${question_categories.join(', ')}
|
||||
|
||||
${user_context ? formatContextSummary(user_context) : ''}
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with ${role_name} expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[Role-specific perspective on requirements]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Role-specific technical analysis]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[Role-specific UX considerations]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Role-specific challenges and solutions]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Role-specific metrics and KPIs]
|
||||
|
||||
## ${roleConfig[role_name].title} Specific Recommendations
|
||||
[Role-specific actionable strategies]
|
||||
|
||||
---
|
||||
*Generated by ${role_name} analysis addressing structured framework*
|
||||
*Context gathered: ${new Date().toISOString()}*
|
||||
```
|
||||
|
||||
### Analysis Document Structure (Incremental Update)
|
||||
|
||||
```markdown
|
||||
# ${roleConfig[role_name].title} Analysis: [Topic]
|
||||
|
||||
## Framework Reference
|
||||
[Existing content preserved]
|
||||
|
||||
## Clarifications
|
||||
### Session ${new Date().toISOString().split('T')[0]}
|
||||
${Object.entries(user_context).map(([q, a]) => `
|
||||
- **Q**: ${q} (Category: ${a.category})
|
||||
**A**: ${a.answer}
|
||||
`).join('\n')}
|
||||
|
||||
## User Context Summary
|
||||
[Updated with new context]
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Existing content enhanced with new insights]
|
||||
|
||||
[Rest of sections updated based on clarifications]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔄 **Integration with Other Commands**
|
||||
|
||||
### Called By
|
||||
- `/workflow:brainstorm:auto-parallel` (Phase 2 - parallel role execution)
|
||||
- Manual invocation for single-role analysis
|
||||
|
||||
### Calls To
|
||||
- `conceptual-planning-agent` (agent execution)
|
||||
- `AskUserQuestion` (interactive context gathering)
|
||||
|
||||
### Coordinates With
|
||||
- `/workflow:brainstorm:artifacts` (creates framework for role analysis)
|
||||
- `/workflow:brainstorm:synthesis` (reads role analyses for integration)
|
||||
|
||||
---
|
||||
|
||||
## ✅ **Quality Assurance**
|
||||
|
||||
### Required Analysis Elements
|
||||
- [ ] Framework discussion points addressed (if framework_mode)
|
||||
- [ ] User context integrated (if context gathered)
|
||||
- [ ] Role template guidelines applied
|
||||
- [ ] Output files follow naming convention (analysis*.md only)
|
||||
- [ ] Framework reference using @ notation
|
||||
- [ ] Session metadata updated
|
||||
|
||||
### Context Quality
|
||||
- [ ] Questions in Chinese with business context
|
||||
- [ ] Options include technical trade-offs
|
||||
- [ ] Categories aligned with role focus
|
||||
- [ ] No generic questions unrelated to framework
|
||||
|
||||
### Update Quality (if update_mode)
|
||||
- [ ] "Clarifications" section added with timestamp
|
||||
- [ ] New insights merged without content loss
|
||||
- [ ] Conflicts documented and resolved
|
||||
- [ ] Framework alignment maintained
|
||||
|
||||
---
|
||||
|
||||
## 🎛️ **Command Parameters**
|
||||
|
||||
### Required Parameters
|
||||
- `[role-name]`: Role identifier (ux-expert, ui-designer, system-architect, etc.)
|
||||
|
||||
### Optional Parameters
|
||||
- `--session [session-id]`: Specify brainstorming session (auto-detect if omitted)
|
||||
- `--update`: Force incremental update mode (auto-detect if analysis exists)
|
||||
- `--include-questions`: Force context gathering even if analysis exists
|
||||
- `--skip-questions`: Skip all interactive context gathering
|
||||
- `--style-skill [package]`: For ui-designer only, load style SKILL package
|
||||
|
||||
### Parameter Combinations
|
||||
|
||||
| Scenario | Command | Behavior |
|
||||
|----------|---------|----------|
|
||||
| New analysis | `role-analysis ux-expert` | Generate + ask context questions |
|
||||
| Quick generation | `role-analysis ux-expert --skip-questions` | Generate without context |
|
||||
| Update existing | `role-analysis ux-expert --update` | Ask clarifications + merge |
|
||||
| Force questions | `role-analysis ux-expert --include-questions` | Ask even if exists |
|
||||
| Specific session | `role-analysis ux-expert --session WFS-xxx` | Target specific session |
|
||||
|
||||
---
|
||||
|
||||
## 🚫 **Error Handling**
|
||||
|
||||
### Invalid Role Name
|
||||
```
|
||||
ERROR: Unknown role: "ui-expert"
|
||||
Valid roles: ux-expert, ui-designer, system-architect, product-manager,
|
||||
product-owner, scrum-master, subject-matter-expert,
|
||||
data-architect, api-designer
|
||||
```
|
||||
|
||||
### No Active Session
|
||||
```
|
||||
ERROR: No active brainstorming session found
|
||||
Run: /workflow:brainstorm:artifacts "[topic]" to create session
|
||||
```
|
||||
|
||||
### Missing Framework (with warning)
|
||||
```
|
||||
WARN: No guidance-specification.md found
|
||||
Generating standalone analysis without framework alignment
|
||||
Recommend: Run /workflow:brainstorm:artifacts first for better results
|
||||
```
|
||||
|
||||
### Agent Execution Failure
|
||||
```
|
||||
ERROR: Conceptual planning agent failed
|
||||
Check: ${brainstorm_dir}/${role_name}/error.log
|
||||
Action: Retry with --skip-questions or check framework validity
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔧 **Advanced Usage**
|
||||
|
||||
### Batch Role Generation (via auto-parallel)
|
||||
```bash
|
||||
# This command handles multiple roles in parallel
|
||||
/workflow:brainstorm:auto-parallel "topic" --count 3
|
||||
# → Internally calls role-analysis for each selected role
|
||||
```
|
||||
|
||||
### Manual Multi-Role Workflow
|
||||
```bash
|
||||
# 1. Create framework
|
||||
/workflow:brainstorm:artifacts "Build real-time collaboration platform" --count 3
|
||||
|
||||
# 2. Generate each role with context
|
||||
/workflow:brainstorm:role-analysis system-architect --include-questions
|
||||
/workflow:brainstorm:role-analysis ui-designer --include-questions
|
||||
/workflow:brainstorm:role-analysis product-manager --include-questions
|
||||
|
||||
# 3. Synthesize insights
|
||||
/workflow:brainstorm:synthesis --session WFS-xxx
|
||||
```
|
||||
|
||||
### Iterative Refinement
|
||||
```bash
|
||||
# Initial generation
|
||||
/workflow:brainstorm:role-analysis ux-expert
|
||||
|
||||
# User reviews and wants more depth
|
||||
/workflow:brainstorm:role-analysis ux-expert --update --include-questions
|
||||
# → Asks clarification questions, merges new insights
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📚 **Reference Information**
|
||||
|
||||
### Role Template Locations
|
||||
- Templates: `~/.claude/workflows/cli-templates/planning-roles/`
|
||||
- Format: `{role-name}.md` (e.g., `ux-expert.md`, `system-architect.md`)
|
||||
|
||||
### Related Commands
|
||||
- `/workflow:brainstorm:artifacts` - Create framework and select roles
|
||||
- `/workflow:brainstorm:auto-parallel` - Parallel multi-role execution
|
||||
- `/workflow:brainstorm:synthesis` - Integrate role analyses
|
||||
- `/workflow:plan` - Generate implementation plan from synthesis
|
||||
|
||||
### Context Package
|
||||
- Location: `.workflow/active/WFS-{session}/.process/context-package.json`
|
||||
- Used by: `context-search-agent` (Phase 0 of artifacts)
|
||||
- Contains: Project context, tech stack, conflict risks
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
name: scrum-master
|
||||
description: Generate or update scrum-master/analysis.md addressing guidance-specification discussion points for Agile process perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎯 **Scrum Master Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating scrum-master/analysis.md** that addresses guidance-specification.md discussion points from agile process and team collaboration perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Agile Process Focus**: Sprint planning, team dynamics, and delivery optimization
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Sprint Planning**: Task breakdown, estimation, and iteration planning
|
||||
- **Team Collaboration**: Communication patterns, impediment removal, and facilitation
|
||||
- **Process Optimization**: Agile ceremonies, retrospectives, and continuous improvement
|
||||
- **Delivery Management**: Velocity tracking, burndown analysis, and release planning
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute scrum-master analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: scrum-master
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/scrum-master/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load scrum-master planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/scrum-master.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from agile process and team collaboration perspective
|
||||
**Role Focus**: Sprint planning, team dynamics, process optimization, delivery management
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive agile process analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with scrum mastery expertise
|
||||
- Provide actionable sprint planning and team facilitation strategies
|
||||
- Include process optimization and impediment removal insights
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute scrum-master analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing scrum-master framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured scrum-master analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with scrum-master completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/scrum-master/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# Scrum Master Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: Agile Process & Team Collaboration perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with scrum mastery expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[Sprint planning and iteration breakdown perspective]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Technical debt management and process considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[User story refinement and acceptance criteria analysis]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Impediment identification and removal strategies]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Velocity tracking, burndown metrics, and team performance indicators]
|
||||
|
||||
## Scrum Master Specific Recommendations
|
||||
[Role-specific agile process optimization and team facilitation strategies]
|
||||
|
||||
---
|
||||
*Generated by scrum-master analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"scrum_master": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/scrum-master/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: Agile process insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
name: subject-matter-expert
|
||||
description: Generate or update subject-matter-expert/analysis.md addressing guidance-specification discussion points for domain expertise perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎯 **Subject Matter Expert Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating subject-matter-expert/analysis.md** that addresses guidance-specification.md discussion points from domain knowledge and technical expertise perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Domain Expertise Focus**: Deep technical knowledge, industry standards, and best practices
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Domain Knowledge**: Industry-specific expertise, regulatory requirements, and compliance
|
||||
- **Technical Standards**: Best practices, design patterns, and architectural guidelines
|
||||
- **Risk Assessment**: Technical debt, scalability concerns, and maintenance implications
|
||||
- **Knowledge Transfer**: Documentation strategies, training requirements, and expertise sharing
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute subject-matter-expert analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: subject-matter-expert
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/subject-matter-expert/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load subject-matter-expert planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/subject-matter-expert.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from domain expertise and technical standards perspective
|
||||
**Role Focus**: Domain knowledge, technical standards, risk assessment, knowledge transfer
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive domain expertise analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with subject matter expertise
|
||||
- Provide actionable technical standards and best practices recommendations
|
||||
- Include risk assessment and compliance considerations
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute subject-matter-expert analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing subject-matter-expert framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured subject-matter-expert analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with subject-matter-expert completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/subject-matter-expert/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# Subject Matter Expert Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: Domain Expertise & Technical Standards perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with subject matter expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[Domain-specific requirements and industry standards perspective]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Deep technical analysis, architectural patterns, and best practices]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[Domain-specific usability standards and industry conventions]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Technical risks, scalability concerns, and maintenance implications]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[Domain-specific KPIs, compliance metrics, and quality standards]
|
||||
|
||||
## Subject Matter Expert Specific Recommendations
|
||||
[Role-specific technical expertise and industry best practices]
|
||||
|
||||
---
|
||||
*Generated by subject-matter-expert analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"subject_matter_expert": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/subject-matter-expert/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: Domain expertise insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
@@ -1,326 +1,323 @@
|
||||
---
|
||||
name: synthesis
|
||||
description: Clarify and refine role analyses through intelligent Q&A and targeted updates with synthesis agent
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*), Edit(*), Glob(*)
|
||||
argument-hint: "[-y|--yes] [optional: --session session-id]"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*), Edit(*), Glob(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-select all enhancements, skip clarification questions, use default answers.
|
||||
|
||||
## Overview
|
||||
|
||||
Three-phase workflow to eliminate ambiguities and enhance conceptual depth in role analyses:
|
||||
Six-phase workflow to eliminate ambiguities and enhance conceptual depth in role analyses:
|
||||
|
||||
**Phase 1-2 (Main Flow)**: Session detection → File discovery → Path preparation
|
||||
**Phase 1-2**: Session detection → File discovery → Path preparation
|
||||
**Phase 3A**: Cross-role analysis agent → Generate recommendations
|
||||
**Phase 4**: User selects enhancements → User answers clarifications (via AskUserQuestion)
|
||||
**Phase 5**: Parallel update agents (one per role)
|
||||
**Phase 6**: Context package update → Metadata update → Completion report
|
||||
|
||||
**Phase 3A (Analysis Agent)**: Cross-role analysis → Generate recommendations
|
||||
|
||||
**Phase 4 (Main Flow)**: User selects enhancements → User answers clarifications → Build update plan
|
||||
|
||||
**Phase 5 (Parallel Update Agents)**: Each agent updates ONE role document → Parallel execution
|
||||
|
||||
**Phase 6 (Main Flow)**: Metadata update → Completion report
|
||||
|
||||
**Key Features**:
|
||||
- Multi-agent architecture (analysis agent + parallel update agents)
|
||||
- Clear separation: Agent analysis vs Main flow interaction
|
||||
- Parallel document updates (one agent per role)
|
||||
- User intent alignment validation
|
||||
All user interactions use AskUserQuestion tool (max 4 questions per call, multi-round).
|
||||
|
||||
**Document Flow**:
|
||||
- Input: `[role]/analysis*.md`, `guidance-specification.md`, session metadata
|
||||
- Output: Updated `[role]/analysis*.md` with Enhancements + Clarifications sections
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Phase Summary
|
||||
|
||||
| Phase | Goal | Executor | Output |
|
||||
|-------|------|----------|--------|
|
||||
| 1 | Session detection | Main flow | session_id, brainstorm_dir |
|
||||
| 2 | File discovery | Main flow | role_analysis_paths |
|
||||
| 3A | Cross-role analysis | Agent | enhancement_recommendations |
|
||||
| 4 | User interaction | Main flow + AskUserQuestion | update_plan |
|
||||
| 5 | Document updates | Parallel agents | Updated analysis*.md |
|
||||
| 6 | Finalization | Main flow | context-package.json, report |
|
||||
|
||||
### AskUserQuestion Pattern
|
||||
|
||||
```javascript
|
||||
// Enhancement selection (multi-select)
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "请选择要应用的改进建议",
|
||||
header: "改进选择",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{ label: "EP-001: API Contract", description: "添加详细的请求/响应 schema 定义" },
|
||||
{ label: "EP-002: User Intent", description: "明确用户需求优先级和验收标准" }
|
||||
]
|
||||
}]
|
||||
})
|
||||
|
||||
// Clarification questions (single-select, multi-round)
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "MVP 阶段的核心目标是什么?",
|
||||
header: "用户意图",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "快速验证", description: "最小功能集,快速上线获取反馈" },
|
||||
{ label: "技术壁垒", description: "完善架构,为长期发展打基础" },
|
||||
{ label: "功能完整", description: "覆盖所有规划功能,延迟上线" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task Tracking
|
||||
|
||||
```json
|
||||
[
|
||||
{"content": "Detect session and validate analyses", "status": "in_progress", "activeForm": "Detecting session"},
|
||||
{"content": "Detect session and validate analyses", "status": "pending", "activeForm": "Detecting session"},
|
||||
{"content": "Discover role analysis file paths", "status": "pending", "activeForm": "Discovering paths"},
|
||||
{"content": "Execute analysis agent (cross-role analysis)", "status": "pending", "activeForm": "Executing analysis agent"},
|
||||
{"content": "Present enhancements for user selection", "status": "pending", "activeForm": "Presenting enhancements"},
|
||||
{"content": "Generate and present clarification questions", "status": "pending", "activeForm": "Clarifying with user"},
|
||||
{"content": "Build update plan from user input", "status": "pending", "activeForm": "Building update plan"},
|
||||
{"content": "Execute parallel update agents (one per role)", "status": "pending", "activeForm": "Updating documents in parallel"},
|
||||
{"content": "Update session metadata and generate report", "status": "pending", "activeForm": "Finalizing session"}
|
||||
{"content": "Execute analysis agent (cross-role analysis)", "status": "pending", "activeForm": "Executing analysis"},
|
||||
{"content": "Present enhancements via AskUserQuestion", "status": "pending", "activeForm": "Selecting enhancements"},
|
||||
{"content": "Clarification questions via AskUserQuestion", "status": "pending", "activeForm": "Clarifying"},
|
||||
{"content": "Execute parallel update agents", "status": "pending", "activeForm": "Updating documents"},
|
||||
{"content": "Update context package and metadata", "status": "pending", "activeForm": "Finalizing"}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Execution Phases
|
||||
|
||||
### Phase 1: Discovery & Validation
|
||||
|
||||
1. **Detect Session**: Use `--session` parameter or `.workflow/.active-*` marker
|
||||
1. **Detect Session**: Use `--session` parameter or find `.workflow/active/WFS-*`
|
||||
2. **Validate Files**:
|
||||
- `guidance-specification.md` (optional, warn if missing)
|
||||
- `*/analysis*.md` (required, error if empty)
|
||||
3. **Load User Intent**: Extract from `workflow-session.json` (project/description field)
|
||||
3. **Load User Intent**: Extract from `workflow-session.json`
|
||||
|
||||
### Phase 2: Role Discovery & Path Preparation
|
||||
|
||||
**Main flow prepares file paths for Agent**:
|
||||
|
||||
1. **Discover Analysis Files**:
|
||||
- Glob(.workflow/WFS-{session}/.brainstorming/*/analysis*.md)
|
||||
- Supports: analysis.md, analysis-1.md, analysis-2.md, analysis-3.md
|
||||
- Validate: At least one file exists (error if empty)
|
||||
- Glob: `.workflow/active/WFS-{session}/.brainstorming/*/analysis*.md`
|
||||
- Supports: analysis.md + analysis-{slug}.md (max 5)
|
||||
|
||||
2. **Extract Role Information**:
|
||||
- `role_analysis_paths`: Relative paths from brainstorm_dir
|
||||
- `participating_roles`: Role names extracted from directory paths
|
||||
- `role_analysis_paths`: Relative paths
|
||||
- `participating_roles`: Role names from directories
|
||||
|
||||
3. **Pass to Agent** (Phase 3):
|
||||
- `session_id`
|
||||
- `brainstorm_dir`: .workflow/WFS-{session}/.brainstorming/
|
||||
- `role_analysis_paths`: ["product-manager/analysis.md", "system-architect/analysis-1.md", ...]
|
||||
- `participating_roles`: ["product-manager", "system-architect", ...]
|
||||
|
||||
**Main Flow Responsibility**: File discovery and path preparation only (NO file content reading)
|
||||
3. **Pass to Agent**: session_id, brainstorm_dir, role_analysis_paths, participating_roles
|
||||
|
||||
### Phase 3A: Analysis & Enhancement Agent
|
||||
|
||||
**First agent call**: Cross-role analysis and generate enhancement recommendations
|
||||
**Agent executes cross-role analysis**:
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
```javascript
|
||||
Task(conceptual-planning-agent, `
|
||||
## Agent Mission
|
||||
Analyze role documents, identify conflicts/gaps, and generate enhancement recommendations
|
||||
Analyze role documents, identify conflicts/gaps, generate enhancement recommendations
|
||||
|
||||
## Input from Main Flow
|
||||
- brainstorm_dir: {brainstorm_dir}
|
||||
- role_analysis_paths: {role_analysis_paths}
|
||||
- participating_roles: {participating_roles}
|
||||
## Input
|
||||
- brainstorm_dir: ${brainstorm_dir}
|
||||
- role_analysis_paths: ${role_analysis_paths}
|
||||
- participating_roles: ${participating_roles}
|
||||
|
||||
## Execution Instructions
|
||||
[FLOW_CONTROL]
|
||||
## Flow Control Steps
|
||||
1. load_session_metadata → Read workflow-session.json
|
||||
2. load_role_analyses → Read all analysis files
|
||||
3. cross_role_analysis → Identify consensus, conflicts, gaps, ambiguities
|
||||
4. generate_recommendations → Format as EP-001, EP-002, ...
|
||||
|
||||
### Flow Control Steps
|
||||
**AGENT RESPONSIBILITY**: Execute these analysis steps sequentially with context accumulation:
|
||||
|
||||
1. **load_session_metadata**
|
||||
- Action: Load original user intent as primary reference
|
||||
- Command: Read({brainstorm_dir}/../workflow-session.json)
|
||||
- Output: original_user_intent (from project/description field)
|
||||
|
||||
2. **load_role_analyses**
|
||||
- Action: Load all role analysis documents
|
||||
- Command: For each path in role_analysis_paths: Read({brainstorm_dir}/{path})
|
||||
- Output: role_analyses_content_map = {role_name: content}
|
||||
|
||||
3. **cross_role_analysis**
|
||||
- Action: Identify consensus themes, conflicts, gaps, underspecified areas
|
||||
- Output: consensus_themes, conflicting_views, gaps_list, ambiguities
|
||||
|
||||
4. **generate_recommendations**
|
||||
- Action: Convert cross-role analysis findings into structured enhancement recommendations
|
||||
- Format: EP-001, EP-002, ... (sequential numbering)
|
||||
- Fields: id, title, affected_roles, category, current_state, enhancement, rationale, priority
|
||||
- Taxonomy: Map to 9 categories (User Intent, Requirements, Architecture, UX, Feasibility, Risk, Process, Decisions, Terminology)
|
||||
- Output: enhancement_recommendations (JSON array)
|
||||
|
||||
### Output to Main Flow
|
||||
Return JSON array:
|
||||
## Output Format
|
||||
[
|
||||
{
|
||||
\"id\": \"EP-001\",
|
||||
\"title\": \"API Contract Specification\",
|
||||
\"affected_roles\": [\"system-architect\", \"api-designer\"],
|
||||
\"category\": \"Architecture\",
|
||||
\"current_state\": \"High-level API descriptions\",
|
||||
\"enhancement\": \"Add detailed contract definitions with request/response schemas\",
|
||||
\"rationale\": \"Enables precise implementation and testing\",
|
||||
\"priority\": \"High\"
|
||||
},
|
||||
...
|
||||
"id": "EP-001",
|
||||
"title": "API Contract Specification",
|
||||
"affected_roles": ["system-architect", "api-designer"],
|
||||
"category": "Architecture",
|
||||
"current_state": "High-level API descriptions",
|
||||
"enhancement": "Add detailed contract definitions",
|
||||
"rationale": "Enables precise implementation",
|
||||
"priority": "High"
|
||||
}
|
||||
]
|
||||
|
||||
"
|
||||
`)
|
||||
```
|
||||
|
||||
### Phase 4: Main Flow User Interaction
|
||||
### Phase 4: User Interaction
|
||||
|
||||
**Main flow handles all user interaction via text output**:
|
||||
**All interactions via AskUserQuestion (Chinese questions)**
|
||||
|
||||
**⚠️ CRITICAL**: ALL questions MUST use Chinese (所有问题必须用中文) for better user understanding
|
||||
#### Step 1: Enhancement Selection
|
||||
|
||||
1. **Present Enhancement Options** (multi-select):
|
||||
```markdown
|
||||
===== Enhancement 选择 =====
|
||||
```javascript
|
||||
// If enhancements > 4, split into multiple rounds
|
||||
const enhancements = [...]; // from Phase 3A
|
||||
const BATCH_SIZE = 4;
|
||||
|
||||
请选择要应用的改进建议(可多选):
|
||||
for (let i = 0; i < enhancements.length; i += BATCH_SIZE) {
|
||||
const batch = enhancements.slice(i, i + BATCH_SIZE);
|
||||
|
||||
a) EP-001: API Contract Specification
|
||||
影响角色:system-architect, api-designer
|
||||
说明:添加详细的请求/响应 schema 定义
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: `请选择要应用的改进建议 (第${Math.floor(i/BATCH_SIZE)+1}轮)`,
|
||||
header: "改进选择",
|
||||
multiSelect: true,
|
||||
options: batch.map(ep => ({
|
||||
label: `${ep.id}: ${ep.title}`,
|
||||
description: `影响: ${ep.affected_roles.join(', ')} | ${ep.enhancement}`
|
||||
}))
|
||||
}]
|
||||
})
|
||||
|
||||
b) EP-002: User Intent Validation
|
||||
影响角色:product-manager, ux-expert
|
||||
说明:明确用户需求优先级和验收标准
|
||||
// Store selections before next round
|
||||
}
|
||||
|
||||
c) EP-003: Error Handling Strategy
|
||||
影响角色:system-architect
|
||||
说明:统一异常处理和降级方案
|
||||
|
||||
支持格式:1abc 或 1a 1b 1c 或 1a,b,c
|
||||
请输入选择(可跳过输入 skip):
|
||||
// User can also skip: provide "跳过" option
|
||||
```
|
||||
|
||||
2. **Generate Clarification Questions** (based on analysis agent output):
|
||||
- ✅ **ALL questions in Chinese (所有问题必须用中文)**
|
||||
- Use 9-category taxonomy scan results
|
||||
- Prioritize most critical questions (no hard limit)
|
||||
- Each with 2-4 options + descriptions
|
||||
#### Step 2: Clarification Questions
|
||||
|
||||
3. **Interactive Clarification Loop** (max 10 questions per round):
|
||||
```markdown
|
||||
===== Clarification 问题 (第 1/2 轮) =====
|
||||
```javascript
|
||||
// Generate questions based on 9-category taxonomy scan
|
||||
// Categories: User Intent, Requirements, Architecture, UX, Feasibility, Risk, Process, Decisions, Terminology
|
||||
|
||||
【问题1 - 用户意图】MVP 阶段的核心目标是什么?
|
||||
a) 快速验证市场需求
|
||||
说明:最小功能集,快速上线获取反馈
|
||||
b) 建立技术壁垒
|
||||
说明:完善架构,为长期发展打基础
|
||||
c) 实现功能完整性
|
||||
说明:覆盖所有规划功能,延迟上线
|
||||
const clarifications = [...]; // from analysis
|
||||
const BATCH_SIZE = 4;
|
||||
|
||||
【问题2 - 架构决策】技术栈选择的优先考虑因素?
|
||||
a) 团队熟悉度
|
||||
说明:使用现有技术栈,降低学习成本
|
||||
b) 技术先进性
|
||||
说明:采用新技术,提升竞争力
|
||||
c) 生态成熟度
|
||||
说明:选择成熟方案,保证稳定性
|
||||
for (let i = 0; i < clarifications.length; i += BATCH_SIZE) {
|
||||
const batch = clarifications.slice(i, i + BATCH_SIZE);
|
||||
const currentRound = Math.floor(i / BATCH_SIZE) + 1;
|
||||
const totalRounds = Math.ceil(clarifications.length / BATCH_SIZE);
|
||||
|
||||
...(最多10个问题)
|
||||
AskUserQuestion({
|
||||
questions: batch.map(q => ({
|
||||
question: q.question,
|
||||
header: q.category.substring(0, 12),
|
||||
multiSelect: false,
|
||||
options: q.options.map(opt => ({
|
||||
label: opt.label,
|
||||
description: opt.description
|
||||
}))
|
||||
}))
|
||||
})
|
||||
|
||||
请回答 (格式: 1a 2b 3c...):
|
||||
// Store answers before next round
|
||||
}
|
||||
```
|
||||
|
||||
Wait for user input → Parse all answers in batch → Continue to next round if needed
|
||||
### Question Guidelines
|
||||
|
||||
4. **Build Update Plan**:
|
||||
```
|
||||
**Target**: 开发者(理解技术但需要从用户需求出发)
|
||||
|
||||
**Question Structure**: `[跨角色分析发现] + [需要澄清的决策点]`
|
||||
**Option Structure**: `标签:[具体方案] + 说明:[业务影响] + [技术权衡]`
|
||||
|
||||
**9-Category Taxonomy**:
|
||||
|
||||
| Category | Focus | Example Question Pattern |
|
||||
|----------|-------|--------------------------|
|
||||
| User Intent | 用户目标 | "MVP阶段核心目标?" + 验证/壁垒/完整性 |
|
||||
| Requirements | 需求细化 | "功能优先级如何排序?" + 核心/增强/可选 |
|
||||
| Architecture | 架构决策 | "技术栈选择考量?" + 熟悉度/先进性/成熟度 |
|
||||
| UX | 用户体验 | "交互复杂度取舍?" + 简洁/丰富/渐进 |
|
||||
| Feasibility | 可行性 | "资源约束下的范围?" + 最小/标准/完整 |
|
||||
| Risk | 风险管理 | "风险容忍度?" + 保守/平衡/激进 |
|
||||
| Process | 流程规范 | "迭代节奏?" + 快速/稳定/灵活 |
|
||||
| Decisions | 决策确认 | "冲突解决方案?" + 方案A/方案B/折中 |
|
||||
| Terminology | 术语统一 | "统一使用哪个术语?" + 术语A/术语B |
|
||||
|
||||
**Quality Rules**:
|
||||
|
||||
**MUST Include**:
|
||||
- ✅ All questions in Chinese (用中文提问)
|
||||
- ✅ 基于跨角色分析的具体发现
|
||||
- ✅ 选项包含业务影响说明
|
||||
- ✅ 解决实际的模糊点或冲突
|
||||
|
||||
**MUST Avoid**:
|
||||
- ❌ 与角色分析无关的通用问题
|
||||
- ❌ 重复已在 artifacts 阶段确认的内容
|
||||
- ❌ 过于细节的实现级问题
|
||||
|
||||
#### Step 3: Build Update Plan
|
||||
|
||||
```javascript
|
||||
update_plan = {
|
||||
"role1": {
|
||||
"enhancements": [EP-001, EP-003],
|
||||
"enhancements": ["EP-001", "EP-003"],
|
||||
"clarifications": [
|
||||
{"question": "...", "answer": "...", "category": "..."},
|
||||
...
|
||||
{"question": "...", "answer": "...", "category": "..."}
|
||||
]
|
||||
},
|
||||
"role2": {
|
||||
"enhancements": [EP-002],
|
||||
"enhancements": ["EP-002"],
|
||||
"clarifications": [...]
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 5: Parallel Document Update Agents
|
||||
|
||||
**Parallel agent calls** (one per role needing updates):
|
||||
**Execute in parallel** (one agent per role):
|
||||
|
||||
```bash
|
||||
# Execute in parallel using single message with multiple Task calls
|
||||
|
||||
Task(conceptual-planning-agent): "
|
||||
```javascript
|
||||
// Single message with multiple Task calls for parallelism
|
||||
Task(conceptual-planning-agent, `
|
||||
## Agent Mission
|
||||
Apply user-confirmed enhancements and clarifications to {role1} analysis document
|
||||
Apply enhancements and clarifications to ${role} analysis
|
||||
|
||||
## Agent Intent
|
||||
- **Goal**: Integrate synthesis results into role-specific analysis
|
||||
- **Scope**: Update ONLY {role1}/analysis.md (isolated, no cross-role dependencies)
|
||||
- **Constraints**: Preserve original insights, add refinements without deletion
|
||||
## Input
|
||||
- role: ${role}
|
||||
- analysis_path: ${brainstorm_dir}/${role}/analysis.md
|
||||
- enhancements: ${role_enhancements}
|
||||
- clarifications: ${role_clarifications}
|
||||
- original_user_intent: ${intent}
|
||||
|
||||
## Input from Main Flow
|
||||
- role: {role1}
|
||||
- analysis_path: {brainstorm_dir}/{role1}/analysis.md
|
||||
- enhancements: [EP-001, EP-003] (user-selected improvements)
|
||||
- clarifications: [{question, answer, category}, ...] (user-confirmed answers)
|
||||
- original_user_intent: {from session metadata}
|
||||
## Flow Control Steps
|
||||
1. load_current_analysis → Read analysis file
|
||||
2. add_clarifications_section → Insert Q&A section
|
||||
3. apply_enhancements → Integrate into relevant sections
|
||||
4. resolve_contradictions → Remove conflicts
|
||||
5. enforce_terminology → Align terminology
|
||||
6. validate_intent → Verify alignment with user intent
|
||||
7. write_updated_file → Save changes
|
||||
|
||||
## Execution Instructions
|
||||
[FLOW_CONTROL]
|
||||
|
||||
### Flow Control Steps
|
||||
**AGENT RESPONSIBILITY**: Execute these update steps sequentially:
|
||||
|
||||
1. **load_current_analysis**
|
||||
- Action: Load existing role analysis document
|
||||
- Command: Read({brainstorm_dir}/{role1}/analysis.md)
|
||||
- Output: current_analysis_content
|
||||
|
||||
2. **add_clarifications_section**
|
||||
- Action: Insert Clarifications section with Q&A
|
||||
- Format: \"## Clarifications\\n### Session {date}\\n- **Q**: {question} (Category: {category})\\n **A**: {answer}\"
|
||||
- Output: analysis_with_clarifications
|
||||
|
||||
3. **apply_enhancements**
|
||||
- Action: Integrate EP-001, EP-003 into relevant sections
|
||||
- Strategy: Locate section by category (Architecture → Architecture section, UX → User Experience section)
|
||||
- Output: analysis_with_enhancements
|
||||
|
||||
4. **resolve_contradictions**
|
||||
- Action: Remove conflicts between original content and clarifications/enhancements
|
||||
- Output: contradiction_free_analysis
|
||||
|
||||
5. **enforce_terminology_consistency**
|
||||
- Action: Align all terminology with user-confirmed choices from clarifications
|
||||
- Output: terminology_consistent_analysis
|
||||
|
||||
6. **validate_user_intent_alignment**
|
||||
- Action: Verify all updates support original_user_intent
|
||||
- Output: validated_analysis
|
||||
|
||||
7. **write_updated_file**
|
||||
- Action: Save final analysis document
|
||||
- Command: Write({brainstorm_dir}/{role1}/analysis.md, validated_analysis)
|
||||
- Output: File update confirmation
|
||||
|
||||
### Output
|
||||
Updated {role1}/analysis.md with Clarifications section + enhanced content
|
||||
")
|
||||
|
||||
Task(conceptual-planning-agent): "
|
||||
## Agent Mission
|
||||
Apply user-confirmed enhancements and clarifications to {role2} analysis document
|
||||
|
||||
## Agent Intent
|
||||
- **Goal**: Integrate synthesis results into role-specific analysis
|
||||
- **Scope**: Update ONLY {role2}/analysis.md (isolated, no cross-role dependencies)
|
||||
- **Constraints**: Preserve original insights, add refinements without deletion
|
||||
|
||||
## Input from Main Flow
|
||||
- role: {role2}
|
||||
- analysis_path: {brainstorm_dir}/{role2}/analysis.md
|
||||
- enhancements: [EP-002] (user-selected improvements)
|
||||
- clarifications: [{question, answer, category}, ...] (user-confirmed answers)
|
||||
- original_user_intent: {from session metadata}
|
||||
|
||||
## Execution Instructions
|
||||
[FLOW_CONTROL]
|
||||
|
||||
### Flow Control Steps
|
||||
**AGENT RESPONSIBILITY**: Execute same 7 update steps as {role1} agent (load → clarifications → enhancements → contradictions → terminology → validation → write)
|
||||
|
||||
### Output
|
||||
Updated {role2}/analysis.md with Clarifications section + enhanced content
|
||||
")
|
||||
|
||||
# ... repeat for each role in update_plan
|
||||
## Output
|
||||
Updated ${role}/analysis.md
|
||||
`)
|
||||
```
|
||||
|
||||
**Agent Characteristics**:
|
||||
- **Intent**: Integrate user-confirmed synthesis results (NOT generate new analysis)
|
||||
- **Isolation**: Each agent updates exactly ONE role (parallel execution safe)
|
||||
- **Context**: Minimal - receives only role-specific enhancements + clarifications
|
||||
- **Dependencies**: Zero cross-agent dependencies (full parallelism)
|
||||
- **Isolation**: Each agent updates exactly ONE role (parallel safe)
|
||||
- **Dependencies**: Zero cross-agent dependencies
|
||||
- **Validation**: All updates must align with original_user_intent
|
||||
|
||||
### Phase 6: Completion & Metadata Update
|
||||
### Phase 6: Finalization
|
||||
|
||||
**Main flow finalizes**:
|
||||
#### Step 1: Update Context Package
|
||||
|
||||
```javascript
|
||||
// Sync updated analyses to context-package.json
|
||||
const context_pkg = Read(".workflow/active/WFS-{session}/.process/context-package.json")
|
||||
|
||||
// Update guidance-specification if exists
|
||||
// Update synthesis-specification if exists
|
||||
// Re-read all role analysis files
|
||||
// Update metadata timestamps
|
||||
|
||||
Write(context_pkg_path, JSON.stringify(context_pkg))
|
||||
```
|
||||
|
||||
#### Step 2: Update Session Metadata
|
||||
|
||||
1. Wait for all parallel agents to complete
|
||||
2. Update workflow-session.json:
|
||||
```json
|
||||
{
|
||||
"phases": {
|
||||
@@ -330,15 +327,13 @@ Updated {role2}/analysis.md with Clarifications section + enhanced content
|
||||
"completed_at": "timestamp",
|
||||
"participating_roles": [...],
|
||||
"clarification_results": {
|
||||
"enhancements_applied": ["EP-001", "EP-002", ...],
|
||||
"enhancements_applied": ["EP-001", "EP-002"],
|
||||
"questions_asked": 3,
|
||||
"categories_clarified": ["Architecture", "UX", ...],
|
||||
"roles_updated": ["role1", "role2", ...],
|
||||
"outstanding_items": []
|
||||
"categories_clarified": ["Architecture", "UX"],
|
||||
"roles_updated": ["role1", "role2"]
|
||||
},
|
||||
"quality_metrics": {
|
||||
"user_intent_alignment": "validated",
|
||||
"requirement_coverage": "comprehensive",
|
||||
"ambiguity_resolution": "complete",
|
||||
"terminology_consistency": "enforced"
|
||||
}
|
||||
@@ -347,7 +342,8 @@ Updated {role2}/analysis.md with Clarifications section + enhanced content
|
||||
}
|
||||
```
|
||||
|
||||
3. Generate completion report (show to user):
|
||||
#### Step 3: Completion Report
|
||||
|
||||
```markdown
|
||||
## ✅ Clarification Complete
|
||||
|
||||
@@ -359,9 +355,11 @@ Updated {role2}/analysis.md with Clarifications section + enhanced content
|
||||
✅ PROCEED: `/workflow:plan --session WFS-{session-id}`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Output
|
||||
|
||||
**Location**: `.workflow/WFS-{session}/.brainstorming/[role]/analysis*.md` (in-place updates)
|
||||
**Location**: `.workflow/active/WFS-{session}/.brainstorming/[role]/analysis*.md`
|
||||
|
||||
**Updated Structure**:
|
||||
```markdown
|
||||
@@ -381,58 +379,24 @@ Updated {role2}/analysis.md with Clarifications section + enhanced content
|
||||
- Ambiguities resolved, placeholders removed
|
||||
- Consistent terminology
|
||||
|
||||
## Session Metadata
|
||||
|
||||
Update `workflow-session.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"phases": {
|
||||
"BRAINSTORM": {
|
||||
"status": "clarification_completed",
|
||||
"clarification_completed": true,
|
||||
"completed_at": "timestamp",
|
||||
"participating_roles": ["product-manager", "system-architect", ...],
|
||||
"clarification_results": {
|
||||
"questions_asked": 3,
|
||||
"categories_clarified": ["Architecture & Design", ...],
|
||||
"roles_updated": ["system-architect", "ui-designer", ...],
|
||||
"outstanding_items": []
|
||||
},
|
||||
"quality_metrics": {
|
||||
"user_intent_alignment": "validated",
|
||||
"requirement_coverage": "comprehensive",
|
||||
"ambiguity_resolution": "complete",
|
||||
"terminology_consistency": "enforced",
|
||||
"decision_transparency": "documented"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
---
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
**Content**:
|
||||
- All role analyses loaded/analyzed
|
||||
- Cross-role analysis (consensus, conflicts, gaps)
|
||||
- 9-category ambiguity scan
|
||||
- Questions prioritized
|
||||
- Clarifications documented
|
||||
- ✅ All role analyses loaded/analyzed
|
||||
- ✅ Cross-role analysis (consensus, conflicts, gaps)
|
||||
- ✅ 9-category ambiguity scan
|
||||
- ✅ Questions prioritized
|
||||
|
||||
**Analysis**:
|
||||
- User intent validated
|
||||
- Cross-role synthesis complete
|
||||
- Ambiguities resolved
|
||||
- Correct roles updated
|
||||
- Terminology consistent
|
||||
- Contradictions removed
|
||||
- ✅ User intent validated
|
||||
- ✅ Cross-role synthesis complete
|
||||
- ✅ Ambiguities resolved
|
||||
- ✅ Terminology consistent
|
||||
|
||||
**Documents**:
|
||||
- Clarifications section formatted
|
||||
- Sections reflect answers
|
||||
- No placeholders (TODO/TBD)
|
||||
- Valid Markdown
|
||||
- Cross-references maintained
|
||||
|
||||
|
||||
- ✅ Clarifications section formatted
|
||||
- ✅ Sections reflect answers
|
||||
- ✅ No placeholders (TODO/TBD)
|
||||
- ✅ Valid Markdown
|
||||
|
||||
@@ -1,387 +0,0 @@
|
||||
---
|
||||
name: system-architect
|
||||
description: Generate or update system-architect/analysis.md addressing guidance-specification discussion points for system architecture perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🏗️ **System Architect Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating system-architect/analysis.md** that addresses guidance-specification.md discussion points from system architecture perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **Architecture Focus**: Technical architecture, scalability, and system design perspective
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Technical Architecture**: Scalable and maintainable system design
|
||||
- **Technology Selection**: Stack evaluation and architectural decisions
|
||||
- **Performance & Scalability**: Capacity planning and optimization strategies
|
||||
- **Integration Patterns**: System communication and data flow design
|
||||
|
||||
### Role Boundaries & Responsibilities
|
||||
|
||||
#### **What This Role OWNS (Macro-Architecture)**
|
||||
- **System-Level Architecture**: Service boundaries, deployment topology, and system composition
|
||||
- **Cross-Service Communication Patterns**: Choosing between microservices/monolithic, event-driven/request-response, sync/async patterns
|
||||
- **Technology Stack Decisions**: Language, framework, database, and infrastructure choices
|
||||
- **Non-Functional Requirements**: Scalability, performance, availability, disaster recovery, and monitoring strategies
|
||||
- **Integration Planning**: How systems and services connect at the macro level (not specific API contracts)
|
||||
|
||||
#### **What This Role DOES NOT Own (Defers to Other Roles)**
|
||||
- **API Contract Details**: Specific endpoint definitions, URL structures, HTTP methods → Defers to **API Designer**
|
||||
- **Data Schemas**: Detailed data model design and entity relationships → Defers to **Data Architect**
|
||||
- **UI/UX Design**: Interface design and user experience → Defers to **UX Expert** and **UI Designer**
|
||||
|
||||
#### **Handoff Points**
|
||||
- **TO API Designer**: Provides architectural constraints (REST vs GraphQL, sync vs async) that define the API design space
|
||||
- **TO Data Architect**: Provides system-level data flow requirements and integration patterns
|
||||
- **FROM Data Architect**: Receives canonical data model to inform system integration design
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Check existing analysis
|
||||
CHECK: brainstorm_dir/system-architect/analysis.md
|
||||
IF EXISTS:
|
||||
SHOW existing analysis summary
|
||||
ASK: "Analysis exists. Do you want to:"
|
||||
OPTIONS:
|
||||
1. "Update with new insights" → Update existing
|
||||
2. "Replace completely" → Generate new
|
||||
3. "Cancel" → Exit without changes
|
||||
ELSE:
|
||||
CREATE new analysis
|
||||
```
|
||||
|
||||
### Phase 3: Agent Task Generation
|
||||
**Framework-Based Analysis** (when guidance-specification.md exists):
|
||||
```bash
|
||||
Task(subagent_type="conceptual-planning-agent",
|
||||
prompt="Generate system architect analysis addressing topic framework
|
||||
|
||||
## Framework Integration Required
|
||||
**MANDATORY**: Load and address guidance-specification.md discussion points
|
||||
**Framework Reference**: @{session.brainstorm_dir}/guidance-specification.md
|
||||
**Output Location**: {session.brainstorm_dir}/system-architect/analysis.md
|
||||
|
||||
## Analysis Requirements
|
||||
1. **Load Topic Framework**: Read guidance-specification.md completely
|
||||
2. **Address Each Discussion Point**: Respond to all 5 framework sections from system architecture perspective
|
||||
3. **Include Framework Reference**: Start analysis.md with @../guidance-specification.md
|
||||
4. **Technical Focus**: Emphasize scalability, architecture patterns, technology decisions
|
||||
5. **Structured Response**: Use framework structure for analysis organization
|
||||
|
||||
## Framework Sections to Address
|
||||
- Core Requirements (from architecture perspective)
|
||||
- Technical Considerations (detailed architectural analysis)
|
||||
- User Experience Factors (technical UX considerations)
|
||||
- Implementation Challenges (architecture risks and solutions)
|
||||
- Success Metrics (technical metrics and monitoring)
|
||||
|
||||
## Output Structure Required
|
||||
```markdown
|
||||
# System Architect Analysis: [Topic]
|
||||
|
||||
**Framework Reference**: @../guidance-specification.md
|
||||
**Role Focus**: System Architecture and Technical Design
|
||||
|
||||
## Core Requirements Analysis
|
||||
[Address framework requirements from architecture perspective]
|
||||
|
||||
## Technical Considerations
|
||||
[Detailed architectural analysis]
|
||||
|
||||
## User Experience Factors
|
||||
[Technical aspects of UX implementation]
|
||||
|
||||
## Implementation Challenges
|
||||
[Architecture risks and mitigation strategies]
|
||||
|
||||
## Success Metrics
|
||||
[Technical metrics and system monitoring]
|
||||
|
||||
## Architecture-Specific Recommendations
|
||||
[Detailed technical recommendations]
|
||||
```",
|
||||
description="Generate system architect framework-based analysis")
|
||||
```
|
||||
|
||||
### Phase 4: Update Mechanism
|
||||
**Analysis Update Process**:
|
||||
```bash
|
||||
# For existing analysis updates
|
||||
IF update_mode = "incremental":
|
||||
Task(subagent_type="conceptual-planning-agent",
|
||||
prompt="Update existing system architect analysis
|
||||
|
||||
## Current Analysis Context
|
||||
**Existing Analysis**: @{session.brainstorm_dir}/system-architect/analysis.md
|
||||
**Framework Reference**: @{session.brainstorm_dir}/guidance-specification.md
|
||||
|
||||
## Update Requirements
|
||||
1. **Preserve Structure**: Maintain existing analysis structure
|
||||
2. **Add New Insights**: Integrate new technical insights and recommendations
|
||||
3. **Framework Alignment**: Ensure continued alignment with topic framework
|
||||
4. **Technical Updates**: Add new architecture patterns, technology considerations
|
||||
5. **Maintain References**: Keep @../guidance-specification.md reference
|
||||
|
||||
## Update Instructions
|
||||
- Read existing analysis completely
|
||||
- Identify areas for enhancement or new insights
|
||||
- Add technical depth while preserving original structure
|
||||
- Update recommendations with new architectural approaches
|
||||
- Maintain framework discussion point addressing",
|
||||
description="Update system architect analysis incrementally")
|
||||
```
|
||||
|
||||
## Document Structure
|
||||
|
||||
### Output Files
|
||||
```
|
||||
.workflow/WFS-[topic]/.brainstorming/
|
||||
├── guidance-specification.md # Input: Framework (if exists)
|
||||
└── system-architect/
|
||||
└── analysis.md # ★ OUTPUT: Framework-based analysis
|
||||
```
|
||||
|
||||
### Analysis Structure
|
||||
**Required Elements**:
|
||||
- **Framework Reference**: @../guidance-specification.md (if framework exists)
|
||||
- **Role Focus**: System Architecture and Technical Design perspective
|
||||
- **5 Framework Sections**: Address each framework discussion point
|
||||
- **Technical Recommendations**: Architecture-specific insights and solutions
|
||||
- How should we design APIs and manage versioning?
|
||||
|
||||
**4. Performance and Scalability**
|
||||
- Where are the current system performance bottlenecks?
|
||||
- How should we handle traffic growth and scaling demands?
|
||||
- What database scaling and optimization strategies are needed?
|
||||
|
||||
## ⚡ **Two-Step Execution Flow**
|
||||
|
||||
### ⚠️ Session Management - FIRST STEP
|
||||
Session detection and selection:
|
||||
```bash
|
||||
# Check for active sessions
|
||||
active_sessions=$(find .workflow -name ".active-*" 2>/dev/null)
|
||||
if [ multiple_sessions ]; then
|
||||
prompt_user_to_select_session()
|
||||
else
|
||||
use_existing_or_create_new()
|
||||
fi
|
||||
```
|
||||
|
||||
### Step 1: Context Gathering Phase
|
||||
**System Architect Perspective Questioning**
|
||||
|
||||
Before agent assignment, gather comprehensive system architecture context:
|
||||
|
||||
#### 📋 Role-Specific Questions
|
||||
1. **Scale & Performance Requirements**
|
||||
- Expected user load and traffic patterns?
|
||||
- Performance requirements (latency, throughput)?
|
||||
- Data volume and growth projections?
|
||||
|
||||
2. **Technical Constraints & Environment**
|
||||
- Existing technology stack and constraints?
|
||||
- Integration requirements with external systems?
|
||||
- Infrastructure and deployment environment?
|
||||
|
||||
3. **Architecture Complexity & Patterns**
|
||||
- Microservices vs monolithic considerations?
|
||||
- Data consistency and transaction requirements?
|
||||
- Event-driven vs request-response patterns?
|
||||
|
||||
4. **Non-Functional Requirements**
|
||||
- High availability and disaster recovery needs?
|
||||
- Security and compliance requirements?
|
||||
- Monitoring and observability expectations?
|
||||
|
||||
#### Context Validation
|
||||
- **Minimum Response**: Each answer must be ≥50 characters
|
||||
- **Re-prompting**: Insufficient detail triggers follow-up questions
|
||||
- **Context Storage**: Save responses to `.brainstorming/system-architect-context.md`
|
||||
|
||||
### Step 2: Agent Assignment with Flow Control
|
||||
**Dedicated Agent Execution**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute dedicated system-architect conceptual analysis for: {topic}
|
||||
|
||||
ASSIGNED_ROLE: system-architect
|
||||
OUTPUT_LOCATION: .brainstorming/system-architect/
|
||||
USER_CONTEXT: {validated_responses_from_context_gathering}
|
||||
|
||||
Flow Control Steps:
|
||||
[
|
||||
{
|
||||
\"step\": \"load_role_template\",
|
||||
\"action\": \"Load system-architect planning template\",
|
||||
\"command\": \"bash($(cat ~/.claude/workflows/cli-templates/planning-roles/system-architect.md))\",
|
||||
\"output_to\": \"role_template\"
|
||||
}
|
||||
]
|
||||
|
||||
Conceptual Analysis Requirements:
|
||||
- Apply system-architect perspective to topic analysis
|
||||
- Focus on architectural patterns, scalability, and integration points
|
||||
- Use loaded role template framework for analysis structure
|
||||
- Generate role-specific deliverables in designated output location
|
||||
- Address all user context from questioning phase
|
||||
|
||||
Deliverables:
|
||||
- analysis.md: Main system architecture analysis
|
||||
- recommendations.md: Architecture recommendations
|
||||
- deliverables/: Architecture-specific outputs as defined in role template
|
||||
|
||||
Embody system-architect role expertise for comprehensive conceptual planning."
|
||||
```
|
||||
|
||||
### Progress Tracking
|
||||
TodoWrite tracking for two-step process:
|
||||
```json
|
||||
[
|
||||
{"content": "Gather system architect context through role-specific questioning", "status": "in_progress", "activeForm": "Gathering context"},
|
||||
{"content": "Validate context responses and save to system-architect-context.md", "status": "pending", "activeForm": "Validating context"},
|
||||
{"content": "Load system-architect planning template via flow control", "status": "pending", "activeForm": "Loading template"},
|
||||
{"content": "Execute dedicated conceptual-planning-agent for system-architect role", "status": "pending", "activeForm": "Executing agent"}
|
||||
]
|
||||
```
|
||||
|
||||
## 📊 **Output Specification**
|
||||
|
||||
### Output Location
|
||||
```
|
||||
.workflow/WFS-{topic-slug}/.brainstorming/system-architect/
|
||||
├── analysis.md # Primary architecture analysis
|
||||
├── architecture-design.md # Detailed system design and diagrams
|
||||
├── technology-stack.md # Technology stack recommendations and justifications
|
||||
└── integration-plan.md # System integration and API strategies
|
||||
```
|
||||
|
||||
### Document Templates
|
||||
|
||||
#### analysis.md Structure
|
||||
```markdown
|
||||
# System Architecture Analysis: {Topic}
|
||||
*Generated: {timestamp}*
|
||||
|
||||
## Executive Summary
|
||||
[Key architectural findings and recommendations overview]
|
||||
|
||||
## Current State Assessment
|
||||
### Existing Architecture Overview
|
||||
### Technical Stack Analysis
|
||||
### Performance Bottlenecks
|
||||
### Technical Debt Assessment
|
||||
|
||||
## Requirements Analysis
|
||||
### Functional Requirements
|
||||
### Non-Functional Requirements
|
||||
- Performance: [Response time, throughput requirements]
|
||||
- Scalability: [User growth, data volume expectations]
|
||||
- Availability: [Uptime requirements]
|
||||
- Security: [Security requirements]
|
||||
|
||||
## Proposed Architecture
|
||||
### High-Level Architecture Design
|
||||
### Component Breakdown
|
||||
### Data Flow Diagrams
|
||||
### Technology Stack Recommendations
|
||||
|
||||
## Implementation Strategy
|
||||
### Migration Planning
|
||||
### Risk Mitigation
|
||||
### Performance Optimization
|
||||
### Security Considerations
|
||||
|
||||
## Scalability and Maintenance
|
||||
### Horizontal Scaling Strategy
|
||||
### Monitoring and Observability
|
||||
### Deployment Strategy
|
||||
### Long-term Maintenance Plan
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Status Synchronization
|
||||
Upon completion, update `workflow-session.json`:
|
||||
```json
|
||||
{
|
||||
"phases": {
|
||||
"BRAINSTORM": {
|
||||
"system_architect": {
|
||||
"status": "completed",
|
||||
"completed_at": "timestamp",
|
||||
"output_directory": ".workflow/WFS-{topic}/.brainstorming/system-architect/",
|
||||
"key_insights": ["scalability_bottleneck", "architecture_pattern", "technology_recommendation"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Cross-Role Collaboration
|
||||
System architect perspective provides:
|
||||
- **Technical Constraints and Possibilities** → Product Manager
|
||||
- **Architecture Requirements and Limitations** → UI Designer
|
||||
- **Data Architecture Requirements** → Data Architect
|
||||
- **Security Architecture Framework** → Security Expert
|
||||
- **Technical Implementation Framework** → Feature Planner
|
||||
|
||||
## ✅ **Quality Assurance**
|
||||
|
||||
### Required Analysis Elements
|
||||
- [ ] Clear architecture diagrams and component designs
|
||||
- [ ] Detailed technology stack evaluation and recommendations
|
||||
- [ ] Scalability and performance analysis with metrics
|
||||
- [ ] System integration and API design specifications
|
||||
- [ ] Comprehensive risk assessment and mitigation strategies
|
||||
|
||||
### Architecture Design Principles
|
||||
- [ ] **Scalability**: System can handle growth in users and data
|
||||
- [ ] **Maintainability**: Clear code structure, easy to modify and extend
|
||||
- [ ] **Reliability**: Built-in fault tolerance and recovery mechanisms
|
||||
- [ ] **Security**: Integrated security controls and protection measures
|
||||
- [ ] **Performance**: Meets response time and throughput requirements
|
||||
|
||||
### Technical Decision Validation
|
||||
- [ ] Technology choices have thorough justification and comparison analysis
|
||||
- [ ] Architectural patterns align with business requirements and constraints
|
||||
- [ ] Integration solutions consider compatibility and maintenance costs
|
||||
- [ ] Deployment strategies are feasible with acceptable risk levels
|
||||
- [ ] Monitoring and operations strategies are comprehensive and actionable
|
||||
|
||||
### Implementation Readiness
|
||||
- [ ] **Technical Feasibility**: All proposed solutions are technically achievable
|
||||
- [ ] **Resource Planning**: Resource requirements clearly defined and realistic
|
||||
- [ ] **Risk Management**: Technical risks identified with mitigation plans
|
||||
- [ ] **Performance Validation**: Architecture can meet performance requirements
|
||||
- [ ] **Evolution Strategy**: Design allows for future growth and changes
|
||||
@@ -1,221 +0,0 @@
|
||||
---
|
||||
name: ui-designer
|
||||
description: Generate or update ui-designer/analysis.md addressing guidance-specification discussion points for UI design perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎨 **UI Designer Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating ui-designer/analysis.md** that addresses guidance-specification.md discussion points from UI/UX design perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **UI/UX Focus**: User experience, interface design, and accessibility perspective
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **Visual Design**: Color palettes, typography, spacing, and visual hierarchy implementation
|
||||
- **High-Fidelity Mockups**: Polished, pixel-perfect interface designs
|
||||
- **Design System Implementation**: Component libraries, design tokens, and style guides
|
||||
- **Micro-Interactions & Animations**: Transition effects, loading states, and interactive feedback
|
||||
- **Responsive Design**: Layout adaptations for different screen sizes and devices
|
||||
|
||||
### Role Boundaries & Responsibilities
|
||||
|
||||
#### **What This Role OWNS (Concrete Visual Interface Implementation)**
|
||||
- **Visual Design Language**: Colors, typography, iconography, spacing, and overall aesthetic
|
||||
- **High-Fidelity Mockups**: Polished designs showing exactly how the interface will look
|
||||
- **Design System Components**: Building and documenting reusable UI components (buttons, inputs, cards, etc.)
|
||||
- **Design Tokens**: Defining variables for colors, spacing, typography that can be used in code
|
||||
- **Micro-Interactions**: Hover states, transitions, animations, and interactive feedback details
|
||||
- **Responsive Layouts**: Adapting designs for mobile, tablet, and desktop breakpoints
|
||||
|
||||
#### **What This Role DOES NOT Own (Defers to Other Roles)**
|
||||
- **User Research & Personas**: User behavior analysis and needs assessment → Defers to **UX Expert**
|
||||
- **Information Architecture**: Content structure and navigation strategy → Defers to **UX Expert**
|
||||
- **Low-Fidelity Wireframes**: Structural layouts without visual design → Defers to **UX Expert**
|
||||
|
||||
#### **Handoff Points**
|
||||
- **FROM UX Expert**: Receives wireframes, user flows, and information architecture as the foundation for visual design
|
||||
- **TO Frontend Developers**: Provides design specifications, component libraries, and design tokens for implementation
|
||||
- **WITH API Designer**: Coordinates on data presentation and form validation feedback (visual aspects only)
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute ui-designer analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: ui-designer
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/ui-designer/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load ui-designer planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/ui-designer.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from UI/UX perspective
|
||||
**Role Focus**: User experience design, interface optimization, accessibility compliance
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive UI/UX analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with UI/UX design expertise
|
||||
- Provide actionable design recommendations and interface solutions
|
||||
- Include accessibility considerations and WCAG compliance planning
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute ui-designer analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing ui-designer framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured ui-designer analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with ui-designer completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/ui-designer/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# UI Designer Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: UI/UX Design perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with UI/UX expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[UI/UX perspective on requirements]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Interface and design system considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[Detailed UX analysis and recommendations]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Design implementation and accessibility considerations]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[UX metrics and usability success criteria]
|
||||
|
||||
## UI/UX Specific Recommendations
|
||||
[Role-specific design recommendations and solutions]
|
||||
|
||||
---
|
||||
*Generated by ui-designer analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"ui_designer": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/ui-designer/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: UI/UX insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
@@ -1,221 +0,0 @@
|
||||
---
|
||||
name: ux-expert
|
||||
description: Generate or update ux-expert/analysis.md addressing guidance-specification discussion points for UX perspective
|
||||
argument-hint: "optional topic - uses existing framework if available"
|
||||
allowed-tools: Task(conceptual-planning-agent), TodoWrite(*), Read(*), Write(*)
|
||||
---
|
||||
|
||||
## 🎯 **UX Expert Analysis Generator**
|
||||
|
||||
### Purpose
|
||||
**Specialized command for generating ux-expert/analysis.md** that addresses guidance-specification.md discussion points from user experience and interface design perspective. Creates or updates role-specific analysis with framework references.
|
||||
|
||||
### Core Function
|
||||
- **Framework-based Analysis**: Address each discussion point in guidance-specification.md
|
||||
- **UX Design Focus**: User interface, interaction patterns, and usability optimization
|
||||
- **Update Mechanism**: Create new or update existing analysis.md
|
||||
- **Agent Delegation**: Use conceptual-planning-agent for analysis generation
|
||||
|
||||
### Analysis Scope
|
||||
- **User Research**: User personas, behavioral analysis, and needs assessment
|
||||
- **Information Architecture**: Content structure, navigation hierarchy, and mental models
|
||||
- **User Journey Mapping**: User flows, task analysis, and interaction models
|
||||
- **Usability Strategy**: Accessibility planning, cognitive load reduction, and user testing frameworks
|
||||
- **Wireframing**: Low-fidelity layouts and structural prototypes (not visual design)
|
||||
|
||||
### Role Boundaries & Responsibilities
|
||||
|
||||
#### **What This Role OWNS (Abstract User Experience & Research)**
|
||||
- **User Research & Personas**: Understanding target users, their goals, pain points, and behaviors
|
||||
- **Information Architecture**: Organizing content and defining navigation structures at a conceptual level
|
||||
- **User Journey Mapping**: Defining user flows, task sequences, and interaction models
|
||||
- **Wireframes & Low-Fidelity Prototypes**: Structural layouts showing information hierarchy (boxes and arrows, not colors/fonts)
|
||||
- **Usability Testing Strategy**: Planning user testing, A/B tests, and validation methods
|
||||
- **Accessibility Planning**: WCAG compliance strategy and inclusive design principles
|
||||
|
||||
#### **What This Role DOES NOT Own (Defers to Other Roles)**
|
||||
- **Visual Design**: Colors, typography, spacing, visual style → Defers to **UI Designer**
|
||||
- **High-Fidelity Mockups**: Polished, pixel-perfect designs → Defers to **UI Designer**
|
||||
- **Component Implementation**: Design system components, CSS, animations → Defers to **UI Designer**
|
||||
|
||||
#### **Handoff Points**
|
||||
- **TO UI Designer**: Provides wireframes, user flows, and information architecture that UI Designer will transform into high-fidelity visual designs
|
||||
- **FROM User Research**: May receive external research data to inform UX decisions
|
||||
- **TO Product Owner**: Provides user insights and validation results to inform feature prioritization
|
||||
|
||||
## ⚙️ **Execution Protocol**
|
||||
|
||||
### Phase 1: Session & Framework Detection
|
||||
```bash
|
||||
# Check active session and framework
|
||||
CHECK: .workflow/.active-* marker files
|
||||
IF active_session EXISTS:
|
||||
session_id = get_active_session()
|
||||
brainstorm_dir = .workflow/WFS-{session}/.brainstorming/
|
||||
|
||||
CHECK: brainstorm_dir/guidance-specification.md
|
||||
IF EXISTS:
|
||||
framework_mode = true
|
||||
load_framework = true
|
||||
ELSE:
|
||||
IF topic_provided:
|
||||
framework_mode = false # Create analysis without framework
|
||||
ELSE:
|
||||
ERROR: "No framework found and no topic provided"
|
||||
```
|
||||
|
||||
### Phase 2: Analysis Mode Detection
|
||||
```bash
|
||||
# Determine execution mode
|
||||
IF framework_mode == true:
|
||||
mode = "framework_based_analysis"
|
||||
topic_ref = load_framework_topic()
|
||||
discussion_points = extract_framework_points()
|
||||
ELSE:
|
||||
mode = "standalone_analysis"
|
||||
topic_ref = provided_topic
|
||||
discussion_points = generate_basic_structure()
|
||||
```
|
||||
|
||||
### Phase 3: Agent Execution with Flow Control
|
||||
**Framework-Based Analysis Generation**
|
||||
|
||||
```bash
|
||||
Task(conceptual-planning-agent): "
|
||||
[FLOW_CONTROL]
|
||||
|
||||
Execute ux-expert analysis for existing topic framework
|
||||
|
||||
## Context Loading
|
||||
ASSIGNED_ROLE: ux-expert
|
||||
OUTPUT_LOCATION: .workflow/WFS-{session}/.brainstorming/ux-expert/
|
||||
ANALYSIS_MODE: {framework_mode ? "framework_based" : "standalone"}
|
||||
|
||||
## Flow Control Steps
|
||||
1. **load_topic_framework**
|
||||
- Action: Load structured topic discussion framework
|
||||
- Command: Read(.workflow/WFS-{session}/.brainstorming/guidance-specification.md)
|
||||
- Output: topic_framework_content
|
||||
|
||||
2. **load_role_template**
|
||||
- Action: Load ux-expert planning template
|
||||
- Command: bash($(cat ~/.claude/workflows/cli-templates/planning-roles/ux-expert.md))
|
||||
- Output: role_template_guidelines
|
||||
|
||||
3. **load_session_metadata**
|
||||
- Action: Load session metadata and existing context
|
||||
- Command: Read(.workflow/WFS-{session}/workflow-session.json)
|
||||
- Output: session_context
|
||||
|
||||
## Analysis Requirements
|
||||
**Framework Reference**: Address all discussion points in guidance-specification.md from user experience and interface design perspective
|
||||
**Role Focus**: UI design, interaction patterns, usability optimization, design systems
|
||||
**Structured Approach**: Create analysis.md addressing framework discussion points
|
||||
**Template Integration**: Apply role template guidelines within framework structure
|
||||
|
||||
## Expected Deliverables
|
||||
1. **analysis.md**: Comprehensive UX design analysis addressing all framework discussion points
|
||||
2. **Framework Reference**: Include @../guidance-specification.md reference in analysis
|
||||
|
||||
## Completion Criteria
|
||||
- Address each discussion point from guidance-specification.md with UX design expertise
|
||||
- Provide actionable interface design and usability optimization strategies
|
||||
- Include accessibility considerations and interaction pattern recommendations
|
||||
- Reference framework document using @ notation for integration
|
||||
"
|
||||
```
|
||||
|
||||
## 📋 **TodoWrite Integration**
|
||||
|
||||
### Workflow Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Detect active session and locate topic framework",
|
||||
status: "in_progress",
|
||||
activeForm: "Detecting session and framework"
|
||||
},
|
||||
{
|
||||
content: "Load guidance-specification.md and session metadata for context",
|
||||
status: "pending",
|
||||
activeForm: "Loading framework and session context"
|
||||
},
|
||||
{
|
||||
content: "Execute ux-expert analysis using conceptual-planning-agent with FLOW_CONTROL",
|
||||
status: "pending",
|
||||
activeForm: "Executing ux-expert framework analysis"
|
||||
},
|
||||
{
|
||||
content: "Generate analysis.md addressing all framework discussion points",
|
||||
status: "pending",
|
||||
activeForm: "Generating structured ux-expert analysis"
|
||||
},
|
||||
{
|
||||
content: "Update workflow-session.json with ux-expert completion status",
|
||||
status: "pending",
|
||||
activeForm: "Updating session metadata"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## 📊 **Output Structure**
|
||||
|
||||
### Framework-Based Analysis
|
||||
```
|
||||
.workflow/WFS-{session}/.brainstorming/ux-expert/
|
||||
└── analysis.md # Structured analysis addressing guidance-specification.md discussion points
|
||||
```
|
||||
|
||||
### Analysis Document Structure
|
||||
```markdown
|
||||
# UX Expert Analysis: [Topic from Framework]
|
||||
|
||||
## Framework Reference
|
||||
**Topic Framework**: @../guidance-specification.md
|
||||
**Role Focus**: User Experience & Interface Design perspective
|
||||
|
||||
## Discussion Points Analysis
|
||||
[Address each point from guidance-specification.md with UX design expertise]
|
||||
|
||||
### Core Requirements (from framework)
|
||||
[User interface and interaction design requirements perspective]
|
||||
|
||||
### Technical Considerations (from framework)
|
||||
[Design system implementation and technical feasibility considerations]
|
||||
|
||||
### User Experience Factors (from framework)
|
||||
[Usability optimization, accessibility, and user-centered design analysis]
|
||||
|
||||
### Implementation Challenges (from framework)
|
||||
[Design implementation challenges and progressive enhancement strategies]
|
||||
|
||||
### Success Metrics (from framework)
|
||||
[UX metrics including usability testing, user satisfaction, and design KPIs]
|
||||
|
||||
## UX Expert Specific Recommendations
|
||||
[Role-specific interface design patterns and usability optimization strategies]
|
||||
|
||||
---
|
||||
*Generated by ux-expert analysis addressing structured framework*
|
||||
```
|
||||
|
||||
## 🔄 **Session Integration**
|
||||
|
||||
### Completion Status Update
|
||||
```json
|
||||
{
|
||||
"ux_expert": {
|
||||
"status": "completed",
|
||||
"framework_addressed": true,
|
||||
"output_location": ".workflow/WFS-{session}/.brainstorming/ux-expert/analysis.md",
|
||||
"framework_reference": "@../guidance-specification.md"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **Framework Reference**: @../guidance-specification.md for structured discussion points
|
||||
- **Cross-Role Synthesis**: UX design insights available for synthesis-report.md integration
|
||||
- **Agent Autonomy**: Independent execution with framework guidance
|
||||
548
.claude/commands/workflow/clean.md
Normal file
548
.claude/commands/workflow/clean.md
Normal file
@@ -0,0 +1,548 @@
|
||||
---
|
||||
name: clean
|
||||
description: Intelligent code cleanup with mainline detection, stale artifact discovery, and safe execution
|
||||
argument-hint: "[-y|--yes] [--dry-run] [\"focus area\"]"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Glob(*), Bash(*), Write(*)
|
||||
---
|
||||
|
||||
# Clean Command (/workflow:clean)
|
||||
|
||||
## Overview
|
||||
|
||||
Intelligent cleanup command that explores the codebase to identify the development mainline, discovers artifacts that have drifted from it, and safely removes stale sessions, abandoned documents, and dead code.
|
||||
|
||||
**Core capabilities:**
|
||||
- Mainline detection: Identify active development branches and core modules
|
||||
- Drift analysis: Find sessions, documents, and code that deviate from mainline
|
||||
- Intelligent discovery: cli-explore-agent based artifact scanning
|
||||
- Safe execution: Confirmation-based cleanup with dry-run preview
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/workflow:clean # Full intelligent cleanup (explore → analyze → confirm → execute)
|
||||
/workflow:clean --yes # Auto mode (use safe defaults, no confirmation)
|
||||
/workflow:clean --dry-run # Explore and analyze only, no execution
|
||||
/workflow:clean -y "auth module" # Auto mode with focus area
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Categories to Clean**: Auto-selects `["Sessions"]` only (safest - only workflow sessions)
|
||||
- **Risk Level**: Auto-selects `"Low only"` (only low-risk items)
|
||||
- All confirmations skipped, proceeds directly to execution
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const dryRun = $ARGUMENTS.includes('--dry-run')
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Mainline Detection
|
||||
├─ Analyze git history for development trends
|
||||
├─ Identify core modules (high commit frequency)
|
||||
├─ Map active vs stale branches
|
||||
└─ Build mainline profile
|
||||
|
||||
Phase 2: Drift Discovery (cli-explore-agent)
|
||||
├─ Scan workflow sessions for orphaned artifacts
|
||||
├─ Identify documents drifted from mainline
|
||||
├─ Detect dead code and unused exports
|
||||
└─ Generate cleanup manifest
|
||||
|
||||
Phase 3: Confirmation
|
||||
├─ Display cleanup summary by category
|
||||
├─ Show impact analysis (files, size, risk)
|
||||
└─ AskUserQuestion: Select categories to clean
|
||||
|
||||
Phase 4: Execution (unless --dry-run)
|
||||
├─ Execute cleanup by category
|
||||
├─ Update manifests and indexes
|
||||
└─ Report results
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Mainline Detection
|
||||
|
||||
**Session Setup**:
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const dateStr = getUtc8ISOString().substring(0, 10)
|
||||
const sessionId = `clean-${dateStr}`
|
||||
const sessionFolder = `.workflow/.clean/${sessionId}`
|
||||
|
||||
Bash(`mkdir -p ${sessionFolder}`)
|
||||
```
|
||||
|
||||
**Step 1.1: Git History Analysis**
|
||||
```bash
|
||||
# Get commit frequency by directory (last 30 days)
|
||||
bash(git log --since="30 days ago" --name-only --pretty=format: | grep -v "^$" | cut -d/ -f1-2 | sort | uniq -c | sort -rn | head -20)
|
||||
|
||||
# Get recent active branches
|
||||
bash(git for-each-ref --sort=-committerdate refs/heads/ --format='%(refname:short) %(committerdate:relative)' | head -10)
|
||||
|
||||
# Get files with most recent changes
|
||||
bash(git log --since="7 days ago" --name-only --pretty=format: | grep -v "^$" | sort | uniq -c | sort -rn | head -30)
|
||||
```
|
||||
|
||||
**Step 1.2: Build Mainline Profile**
|
||||
```javascript
|
||||
const mainlineProfile = {
|
||||
coreModules: [], // High-frequency directories
|
||||
activeFiles: [], // Recently modified files
|
||||
activeBranches: [], // Branches with recent commits
|
||||
staleThreshold: {
|
||||
sessions: 7, // Days
|
||||
branches: 30,
|
||||
documents: 14
|
||||
},
|
||||
timestamp: getUtc8ISOString()
|
||||
}
|
||||
|
||||
// Parse git log output to identify core modules
|
||||
// Modules with >5 commits in last 30 days = core
|
||||
// Modules with 0 commits in last 30 days = potentially stale
|
||||
|
||||
Write(`${sessionFolder}/mainline-profile.json`, JSON.stringify(mainlineProfile, null, 2))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Drift Discovery
|
||||
|
||||
**Launch cli-explore-agent for intelligent artifact scanning**:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description="Discover stale artifacts",
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Discover artifacts that have drifted from the development mainline. Identify stale sessions, abandoned documents, and dead code for cleanup.
|
||||
|
||||
## Context
|
||||
- **Session Folder**: ${sessionFolder}
|
||||
- **Mainline Profile**: ${sessionFolder}/mainline-profile.json
|
||||
- **Focus Area**: ${focusArea || "全项目"}
|
||||
|
||||
## Discovery Categories
|
||||
|
||||
### Category 1: Stale Workflow Sessions
|
||||
Scan and analyze workflow session directories:
|
||||
|
||||
**Locations to scan**:
|
||||
- .workflow/active/WFS-* (active sessions)
|
||||
- .workflow/archives/WFS-* (archived sessions)
|
||||
- .workflow/.lite-plan/* (lite-plan sessions)
|
||||
- .workflow/.debug/DBG-* (debug sessions)
|
||||
|
||||
**Staleness criteria**:
|
||||
- Active sessions: No modification >7 days + no related git commits
|
||||
- Archives: >30 days old + no feature references in project-tech.json
|
||||
- Lite-plan: >7 days old + plan.json not executed
|
||||
- Debug: >3 days old + issue not in recent commits
|
||||
|
||||
**Analysis steps**:
|
||||
1. List all session directories with modification times
|
||||
2. Cross-reference with git log (are session topics in recent commits?)
|
||||
3. Check manifest.json for orphan entries
|
||||
4. Identify sessions with .archiving marker (interrupted)
|
||||
|
||||
### Category 2: Drifted Documents
|
||||
Scan documentation that no longer aligns with code:
|
||||
|
||||
**Locations to scan**:
|
||||
- .claude/rules/tech/* (generated tech rules)
|
||||
- .workflow/.scratchpad/* (temporary notes)
|
||||
- **/CLAUDE.md (module documentation)
|
||||
- **/README.md (outdated descriptions)
|
||||
|
||||
**Drift criteria**:
|
||||
- Tech rules: Referenced files no longer exist
|
||||
- Scratchpad: Any file (always temporary)
|
||||
- Module docs: Describe functions/classes that were removed
|
||||
- READMEs: Reference deleted directories
|
||||
|
||||
**Analysis steps**:
|
||||
1. Parse document content for file/function references
|
||||
2. Verify referenced entities still exist in codebase
|
||||
3. Flag documents with >30% broken references
|
||||
|
||||
### Category 3: Dead Code
|
||||
Identify code that is no longer used:
|
||||
|
||||
**Scan patterns**:
|
||||
- Unused exports (exported but never imported)
|
||||
- Orphan files (not imported anywhere)
|
||||
- Commented-out code blocks (>10 lines)
|
||||
- TODO/FIXME comments >90 days old
|
||||
|
||||
**Analysis steps**:
|
||||
1. Build import graph using rg/grep
|
||||
2. Identify exports with no importers
|
||||
3. Find files not in import graph
|
||||
4. Scan for large comment blocks
|
||||
|
||||
## Output Format
|
||||
|
||||
Write to: ${sessionFolder}/cleanup-manifest.json
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"generated_at": "ISO timestamp",
|
||||
"mainline_summary": {
|
||||
"core_modules": ["src/core", "src/api"],
|
||||
"active_branches": ["main", "feature/auth"],
|
||||
"health_score": 0.85
|
||||
},
|
||||
"discoveries": {
|
||||
"stale_sessions": [
|
||||
{
|
||||
"path": ".workflow/active/WFS-old-feature",
|
||||
"type": "active",
|
||||
"age_days": 15,
|
||||
"reason": "No related commits in 15 days",
|
||||
"size_kb": 1024,
|
||||
"risk": "low"
|
||||
}
|
||||
],
|
||||
"drifted_documents": [
|
||||
{
|
||||
"path": ".claude/rules/tech/deprecated-lib",
|
||||
"type": "tech_rules",
|
||||
"broken_references": 5,
|
||||
"total_references": 6,
|
||||
"drift_percentage": 83,
|
||||
"reason": "Referenced library removed",
|
||||
"risk": "low"
|
||||
}
|
||||
],
|
||||
"dead_code": [
|
||||
{
|
||||
"path": "src/utils/legacy.ts",
|
||||
"type": "orphan_file",
|
||||
"reason": "Not imported by any file",
|
||||
"last_modified": "2025-10-01",
|
||||
"risk": "medium"
|
||||
}
|
||||
]
|
||||
},
|
||||
"summary": {
|
||||
"total_items": 12,
|
||||
"total_size_mb": 45.2,
|
||||
"by_category": {
|
||||
"stale_sessions": 5,
|
||||
"drifted_documents": 4,
|
||||
"dead_code": 3
|
||||
},
|
||||
"by_risk": {
|
||||
"low": 8,
|
||||
"medium": 3,
|
||||
"high": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## Execution Commands
|
||||
|
||||
\`\`\`bash
|
||||
# Session directories
|
||||
find .workflow -type d -name "WFS-*" -o -name "DBG-*" 2>/dev/null
|
||||
|
||||
# Check modification times (Linux/Mac)
|
||||
stat -c "%Y %n" .workflow/active/WFS-* 2>/dev/null
|
||||
|
||||
# Check modification times (Windows PowerShell via bash)
|
||||
powershell -Command "Get-ChildItem '.workflow/active/WFS-*' | ForEach-Object { Write-Output \"$($_.LastWriteTime) $($_.FullName)\" }"
|
||||
|
||||
# Find orphan exports (TypeScript)
|
||||
rg "export (const|function|class|interface|type)" --type ts -l
|
||||
|
||||
# Find imports
|
||||
rg "import.*from" --type ts
|
||||
|
||||
# Find large comment blocks
|
||||
rg "^\\s*/\\*" -A 10 --type ts
|
||||
|
||||
# Find old TODOs
|
||||
rg "TODO|FIXME" --type ts -n
|
||||
\`\`\`
|
||||
|
||||
## Success Criteria
|
||||
- [ ] All session directories scanned with age calculation
|
||||
- [ ] Documents cross-referenced with existing code
|
||||
- [ ] Dead code detection via import graph analysis
|
||||
- [ ] cleanup-manifest.json written with complete data
|
||||
- [ ] Each item has risk level and cleanup reason
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Confirmation
|
||||
|
||||
**Step 3.1: Display Summary**
|
||||
```javascript
|
||||
const manifest = JSON.parse(Read(`${sessionFolder}/cleanup-manifest.json`))
|
||||
|
||||
console.log(`
|
||||
## Cleanup Discovery Report
|
||||
|
||||
**Mainline Health**: ${Math.round(manifest.mainline_summary.health_score * 100)}%
|
||||
**Core Modules**: ${manifest.mainline_summary.core_modules.join(', ')}
|
||||
|
||||
### Summary
|
||||
| Category | Count | Size | Risk |
|
||||
|----------|-------|------|------|
|
||||
| Stale Sessions | ${manifest.summary.by_category.stale_sessions} | - | ${getRiskSummary('sessions')} |
|
||||
| Drifted Documents | ${manifest.summary.by_category.drifted_documents} | - | ${getRiskSummary('documents')} |
|
||||
| Dead Code | ${manifest.summary.by_category.dead_code} | - | ${getRiskSummary('code')} |
|
||||
|
||||
**Total**: ${manifest.summary.total_items} items, ~${manifest.summary.total_size_mb} MB
|
||||
|
||||
### Stale Sessions
|
||||
${manifest.discoveries.stale_sessions.map(s =>
|
||||
`- ${s.path} (${s.age_days}d, ${s.risk}): ${s.reason}`
|
||||
).join('\n')}
|
||||
|
||||
### Drifted Documents
|
||||
${manifest.discoveries.drifted_documents.map(d =>
|
||||
`- ${d.path} (${d.drift_percentage}% broken, ${d.risk}): ${d.reason}`
|
||||
).join('\n')}
|
||||
|
||||
### Dead Code
|
||||
${manifest.discoveries.dead_code.map(c =>
|
||||
`- ${c.path} (${c.type}, ${c.risk}): ${c.reason}`
|
||||
).join('\n')}
|
||||
`)
|
||||
```
|
||||
|
||||
**Step 3.2: Dry-Run Exit**
|
||||
```javascript
|
||||
if (flags.includes('--dry-run')) {
|
||||
console.log(`
|
||||
---
|
||||
**Dry-run mode**: No changes made.
|
||||
Manifest saved to: ${sessionFolder}/cleanup-manifest.json
|
||||
|
||||
To execute cleanup: /workflow:clean
|
||||
`)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3.3: User Confirmation**
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use safe defaults
|
||||
console.log(`[--yes] Auto-selecting safe cleanup defaults:`)
|
||||
console.log(` - Categories: Sessions only`)
|
||||
console.log(` - Risk level: Low only`)
|
||||
|
||||
userSelection = {
|
||||
categories: ["Sessions"],
|
||||
risk: "Low only"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Which categories to clean?",
|
||||
header: "Categories",
|
||||
multiSelect: true,
|
||||
options: [
|
||||
{
|
||||
label: "Sessions",
|
||||
description: `${manifest.summary.by_category.stale_sessions} stale workflow sessions`
|
||||
},
|
||||
{
|
||||
label: "Documents",
|
||||
description: `${manifest.summary.by_category.drifted_documents} drifted documents`
|
||||
},
|
||||
{
|
||||
label: "Dead Code",
|
||||
description: `${manifest.summary.by_category.dead_code} unused code files`
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Risk level to include?",
|
||||
header: "Risk",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Low only", description: "Safest - only obviously stale items" },
|
||||
{ label: "Low + Medium", description: "Recommended - includes likely unused items" },
|
||||
{ label: "All", description: "Aggressive - includes high-risk items" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Execution
|
||||
|
||||
**Step 4.1: Filter Items by Selection**
|
||||
```javascript
|
||||
const selectedCategories = userSelection.categories // ['Sessions', 'Documents', ...]
|
||||
const riskLevel = userSelection.risk // 'Low only', 'Low + Medium', 'All'
|
||||
|
||||
const riskFilter = {
|
||||
'Low only': ['low'],
|
||||
'Low + Medium': ['low', 'medium'],
|
||||
'All': ['low', 'medium', 'high']
|
||||
}[riskLevel]
|
||||
|
||||
const itemsToClean = []
|
||||
|
||||
if (selectedCategories.includes('Sessions')) {
|
||||
itemsToClean.push(...manifest.discoveries.stale_sessions.filter(s => riskFilter.includes(s.risk)))
|
||||
}
|
||||
if (selectedCategories.includes('Documents')) {
|
||||
itemsToClean.push(...manifest.discoveries.drifted_documents.filter(d => riskFilter.includes(d.risk)))
|
||||
}
|
||||
if (selectedCategories.includes('Dead Code')) {
|
||||
itemsToClean.push(...manifest.discoveries.dead_code.filter(c => riskFilter.includes(c.risk)))
|
||||
}
|
||||
|
||||
TodoWrite({
|
||||
todos: itemsToClean.map(item => ({
|
||||
content: `Clean: ${item.path}`,
|
||||
status: "pending",
|
||||
activeForm: `Cleaning ${item.path}`
|
||||
}))
|
||||
})
|
||||
```
|
||||
|
||||
**Step 4.2: Execute Cleanup**
|
||||
```javascript
|
||||
const results = { deleted: [], failed: [], skipped: [] }
|
||||
|
||||
for (const item of itemsToClean) {
|
||||
TodoWrite({ todos: [...] }) // Mark current as in_progress
|
||||
|
||||
try {
|
||||
if (item.type === 'orphan_file' || item.type === 'dead_export') {
|
||||
// Dead code: Delete file or remove export
|
||||
Bash({ command: `rm -rf "${item.path}"` })
|
||||
} else {
|
||||
// Sessions and documents: Delete directory/file
|
||||
Bash({ command: `rm -rf "${item.path}"` })
|
||||
}
|
||||
|
||||
results.deleted.push(item.path)
|
||||
TodoWrite({ todos: [...] }) // Mark as completed
|
||||
} catch (error) {
|
||||
results.failed.push({ path: item.path, error: error.message })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4.3: Update Manifests**
|
||||
```javascript
|
||||
// Update archives manifest if sessions were deleted
|
||||
if (selectedCategories.includes('Sessions')) {
|
||||
const archiveManifestPath = '.workflow/archives/manifest.json'
|
||||
if (fileExists(archiveManifestPath)) {
|
||||
const archiveManifest = JSON.parse(Read(archiveManifestPath))
|
||||
const deletedSessionIds = results.deleted
|
||||
.filter(p => p.includes('WFS-'))
|
||||
.map(p => p.split('/').pop())
|
||||
|
||||
const updatedManifest = archiveManifest.filter(entry =>
|
||||
!deletedSessionIds.includes(entry.session_id)
|
||||
)
|
||||
|
||||
Write(archiveManifestPath, JSON.stringify(updatedManifest, null, 2))
|
||||
}
|
||||
}
|
||||
|
||||
// Update project-tech.json if features referenced deleted sessions
|
||||
const projectPath = '.workflow/project-tech.json'
|
||||
if (fileExists(projectPath)) {
|
||||
const project = JSON.parse(Read(projectPath))
|
||||
const deletedPaths = new Set(results.deleted)
|
||||
|
||||
project.features = project.features.filter(f =>
|
||||
!deletedPaths.has(f.traceability?.archive_path)
|
||||
)
|
||||
|
||||
project.statistics.total_features = project.features.length
|
||||
project.statistics.last_updated = getUtc8ISOString()
|
||||
|
||||
Write(projectPath, JSON.stringify(project, null, 2))
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4.4: Report Results**
|
||||
```javascript
|
||||
console.log(`
|
||||
## Cleanup Complete
|
||||
|
||||
**Deleted**: ${results.deleted.length} items
|
||||
**Failed**: ${results.failed.length} items
|
||||
**Skipped**: ${results.skipped.length} items
|
||||
|
||||
### Deleted Items
|
||||
${results.deleted.map(p => `- ${p}`).join('\n')}
|
||||
|
||||
${results.failed.length > 0 ? `
|
||||
### Failed Items
|
||||
${results.failed.map(f => `- ${f.path}: ${f.error}`).join('\n')}
|
||||
` : ''}
|
||||
|
||||
Cleanup manifest archived to: ${sessionFolder}/cleanup-manifest.json
|
||||
`)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Session Folder Structure
|
||||
|
||||
```
|
||||
.workflow/.clean/{YYYY-MM-DD}/
|
||||
├── mainline-profile.json # Git history analysis
|
||||
└── cleanup-manifest.json # Discovery results
|
||||
```
|
||||
|
||||
## Risk Level Definitions
|
||||
|
||||
| Risk | Description | Examples |
|
||||
|------|-------------|----------|
|
||||
| **Low** | Safe to delete, no dependencies | Empty sessions, scratchpad files, 100% broken docs |
|
||||
| **Medium** | Likely unused, verify before delete | Orphan files, old archives, partially broken docs |
|
||||
| **High** | May have hidden dependencies | Files with some imports, recent modifications |
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| No git repository | Skip mainline detection, use file timestamps only |
|
||||
| Session in use (.archiving) | Skip with warning |
|
||||
| Permission denied | Report error, continue with others |
|
||||
| Manifest parse error | Regenerate from filesystem scan |
|
||||
| Empty discovery | Report "codebase is clean" |
|
||||
|
||||
## Related Commands
|
||||
|
||||
- `/workflow:session:complete` - Properly archive active sessions
|
||||
- `/memory:compact` - Save session memory before cleanup
|
||||
- `/workflow:status` - View current workflow state
|
||||
672
.claude/commands/workflow/debug-with-file.md
Normal file
672
.claude/commands/workflow/debug-with-file.md
Normal file
@@ -0,0 +1,672 @@
|
||||
---
|
||||
name: debug-with-file
|
||||
description: Interactive hypothesis-driven debugging with documented exploration, understanding evolution, and Gemini-assisted correction
|
||||
argument-hint: "[-y|--yes] \"bug description or error message\""
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Grep(*), Glob(*), Bash(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-confirm all decisions (hypotheses, fixes, iteration), use recommended settings.
|
||||
|
||||
# Workflow Debug-With-File Command (/workflow:debug-with-file)
|
||||
|
||||
## Overview
|
||||
|
||||
Enhanced evidence-based debugging with **documented exploration process**. Records understanding evolution, consolidates insights, and uses Gemini to correct misunderstandings.
|
||||
|
||||
**Core workflow**: Explore → Document → Log → Analyze → Correct Understanding → Fix → Verify
|
||||
|
||||
**Scope**: Adds temporary debug logging to observe program state; cleans up all instrumentation after resolution. Does NOT execute code injection, security testing, or modify program behavior.
|
||||
|
||||
**Key enhancements over /workflow:debug**:
|
||||
- **understanding.md**: Timeline of exploration and learning
|
||||
- **Gemini-assisted correction**: Validates and corrects hypotheses
|
||||
- **Consolidation**: Simplifies proven-wrong understanding to avoid clutter
|
||||
- **Learning retention**: Preserves what was learned, even from failed attempts
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/workflow:debug-with-file <BUG_DESCRIPTION>
|
||||
|
||||
# Arguments
|
||||
<bug-description> Bug description, error message, or stack trace (required)
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Session Detection:
|
||||
├─ Check if debug session exists for this bug
|
||||
├─ EXISTS + understanding.md exists → Continue mode
|
||||
└─ NOT_FOUND → Explore mode
|
||||
|
||||
Explore Mode:
|
||||
├─ Locate error source in codebase
|
||||
├─ Document initial understanding in understanding.md
|
||||
├─ Generate testable hypotheses with Gemini validation
|
||||
├─ Add NDJSON debug logging statements
|
||||
└─ Output: Hypothesis list + await user reproduction
|
||||
|
||||
Analyze Mode:
|
||||
├─ Parse debug.log, validate each hypothesis
|
||||
├─ Use Gemini to analyze evidence and correct understanding
|
||||
├─ Update understanding.md with:
|
||||
│ ├─ New evidence
|
||||
│ ├─ Corrected misunderstandings (strikethrough + correction)
|
||||
│ └─ Consolidated current understanding
|
||||
└─ Decision:
|
||||
├─ Confirmed → Fix root cause
|
||||
├─ Inconclusive → Add more logging, iterate
|
||||
└─ All rejected → Gemini-assisted new hypotheses
|
||||
|
||||
Fix & Cleanup:
|
||||
├─ Apply fix based on confirmed hypothesis
|
||||
├─ User verifies
|
||||
├─ Document final understanding + lessons learned
|
||||
├─ Remove debug instrumentation
|
||||
└─ If not fixed → Return to Analyze mode
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Session Setup & Mode Detection
|
||||
|
||||
```javascript
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 30)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10)
|
||||
|
||||
const sessionId = `DBG-${bugSlug}-${dateStr}`
|
||||
const sessionFolder = `.workflow/.debug/${sessionId}`
|
||||
const debugLogPath = `${sessionFolder}/debug.log`
|
||||
const understandingPath = `${sessionFolder}/understanding.md`
|
||||
const hypothesesPath = `${sessionFolder}/hypotheses.json`
|
||||
|
||||
// Auto-detect mode
|
||||
const sessionExists = fs.existsSync(sessionFolder)
|
||||
const hasUnderstanding = sessionExists && fs.existsSync(understandingPath)
|
||||
const logHasContent = sessionExists && fs.existsSync(debugLogPath) && fs.statSync(debugLogPath).size > 0
|
||||
|
||||
const mode = logHasContent ? 'analyze' : (hasUnderstanding ? 'continue' : 'explore')
|
||||
|
||||
if (!sessionExists) {
|
||||
bash(`mkdir -p ${sessionFolder}`)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Explore Mode
|
||||
|
||||
**Step 1.1: Locate Error Source**
|
||||
|
||||
```javascript
|
||||
// Extract keywords from bug description
|
||||
const keywords = extractErrorKeywords(bug_description)
|
||||
|
||||
// Search codebase for error locations
|
||||
const searchResults = []
|
||||
for (const keyword of keywords) {
|
||||
const results = Grep({ pattern: keyword, path: ".", output_mode: "content", "-C": 3 })
|
||||
searchResults.push({ keyword, results })
|
||||
}
|
||||
|
||||
// Identify affected files and functions
|
||||
const affectedLocations = analyzeSearchResults(searchResults)
|
||||
```
|
||||
|
||||
**Step 1.2: Document Initial Understanding**
|
||||
|
||||
Create `understanding.md` with exploration timeline:
|
||||
|
||||
```markdown
|
||||
# Understanding Document
|
||||
|
||||
**Session ID**: ${sessionId}
|
||||
**Bug Description**: ${bug_description}
|
||||
**Started**: ${getUtc8ISOString()}
|
||||
|
||||
---
|
||||
|
||||
## Exploration Timeline
|
||||
|
||||
### Iteration 1 - Initial Exploration (${timestamp})
|
||||
|
||||
#### Current Understanding
|
||||
|
||||
Based on bug description and initial code search:
|
||||
|
||||
- Error pattern: ${errorPattern}
|
||||
- Affected areas: ${affectedLocations.map(l => l.file).join(', ')}
|
||||
- Initial hypothesis: ${initialThoughts}
|
||||
|
||||
#### Evidence from Code Search
|
||||
|
||||
${searchResults.map(r => `
|
||||
**Keyword: "${r.keyword}"**
|
||||
- Found in: ${r.results.files.join(', ')}
|
||||
- Key findings: ${r.insights}
|
||||
`).join('\n')}
|
||||
|
||||
#### Next Steps
|
||||
|
||||
- Generate testable hypotheses
|
||||
- Add instrumentation
|
||||
- Await reproduction
|
||||
|
||||
---
|
||||
|
||||
## Current Consolidated Understanding
|
||||
|
||||
${initialConsolidatedUnderstanding}
|
||||
```
|
||||
|
||||
**Step 1.3: Gemini-Assisted Hypothesis Generation**
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Generate debugging hypotheses for: ${bug_description}
|
||||
Success criteria: Testable hypotheses with clear evidence criteria
|
||||
|
||||
TASK:
|
||||
• Analyze error pattern and code search results
|
||||
• Identify 3-5 most likely root causes
|
||||
• For each hypothesis, specify:
|
||||
- What might be wrong
|
||||
- What evidence would confirm/reject it
|
||||
- Where to add instrumentation
|
||||
• Rank by likelihood
|
||||
|
||||
MODE: analysis
|
||||
|
||||
CONTEXT: @${sessionFolder}/understanding.md | Search results in understanding.md
|
||||
|
||||
EXPECTED:
|
||||
- Structured hypothesis list (JSON format)
|
||||
- Each hypothesis with: id, description, testable_condition, logging_point, evidence_criteria
|
||||
- Likelihood ranking (1=most likely)
|
||||
|
||||
CONSTRAINTS: Focus on testable conditions
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
|
||||
```
|
||||
|
||||
Save Gemini output to `hypotheses.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"iteration": 1,
|
||||
"timestamp": "2025-01-21T10:00:00+08:00",
|
||||
"hypotheses": [
|
||||
{
|
||||
"id": "H1",
|
||||
"description": "Data structure mismatch - expected key not present",
|
||||
"testable_condition": "Check if target key exists in dict",
|
||||
"logging_point": "file.py:func:42",
|
||||
"evidence_criteria": {
|
||||
"confirm": "data shows missing key",
|
||||
"reject": "key exists with valid value"
|
||||
},
|
||||
"likelihood": 1,
|
||||
"status": "pending"
|
||||
}
|
||||
],
|
||||
"gemini_insights": "...",
|
||||
"corrected_assumptions": []
|
||||
}
|
||||
```
|
||||
|
||||
**Step 1.4: Add NDJSON Debug Logging**
|
||||
|
||||
For each hypothesis, add temporary logging statements to observe program state at key execution points. Use NDJSON format for structured log parsing. These are read-only observations that do not modify program behavior.
|
||||
|
||||
**Step 1.5: Update understanding.md**
|
||||
|
||||
Append hypothesis section:
|
||||
|
||||
```markdown
|
||||
#### Hypotheses Generated (Gemini-Assisted)
|
||||
|
||||
${hypotheses.map(h => `
|
||||
**${h.id}** (Likelihood: ${h.likelihood}): ${h.description}
|
||||
- Logging at: ${h.logging_point}
|
||||
- Testing: ${h.testable_condition}
|
||||
- Evidence to confirm: ${h.evidence_criteria.confirm}
|
||||
- Evidence to reject: ${h.evidence_criteria.reject}
|
||||
`).join('\n')}
|
||||
|
||||
**Gemini Insights**: ${geminiInsights}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Analyze Mode
|
||||
|
||||
**Step 2.1: Parse Debug Log**
|
||||
|
||||
```javascript
|
||||
// Parse NDJSON log
|
||||
const entries = Read(debugLogPath).split('\n')
|
||||
.filter(l => l.trim())
|
||||
.map(l => JSON.parse(l))
|
||||
|
||||
// Group by hypothesis
|
||||
const byHypothesis = groupBy(entries, 'hid')
|
||||
```
|
||||
|
||||
**Step 2.2: Gemini-Assisted Evidence Analysis**
|
||||
|
||||
```bash
|
||||
ccw cli -p "
|
||||
PURPOSE: Analyze debug log evidence to validate/correct hypotheses for: ${bug_description}
|
||||
Success criteria: Clear verdict per hypothesis + corrected understanding
|
||||
|
||||
TASK:
|
||||
• Parse log entries by hypothesis
|
||||
• Evaluate evidence against expected criteria
|
||||
• Determine verdict: confirmed | rejected | inconclusive
|
||||
• Identify incorrect assumptions from previous understanding
|
||||
• Suggest corrections to understanding
|
||||
|
||||
MODE: analysis
|
||||
|
||||
CONTEXT:
|
||||
@${debugLogPath}
|
||||
@${understandingPath}
|
||||
@${hypothesesPath}
|
||||
|
||||
EXPECTED:
|
||||
- Per-hypothesis verdict with reasoning
|
||||
- Evidence summary
|
||||
- List of incorrect assumptions with corrections
|
||||
- Updated consolidated understanding
|
||||
- Root cause if confirmed, or next investigation steps
|
||||
|
||||
CONSTRAINTS: Evidence-based reasoning only, no speculation
|
||||
" --tool gemini --mode analysis --rule analysis-diagnose-bug-root-cause
|
||||
```
|
||||
|
||||
**Step 2.3: Update Understanding with Corrections**
|
||||
|
||||
Append new iteration to `understanding.md`:
|
||||
|
||||
```markdown
|
||||
### Iteration ${n} - Evidence Analysis (${timestamp})
|
||||
|
||||
#### Log Analysis Results
|
||||
|
||||
${results.map(r => `
|
||||
**${r.id}**: ${r.verdict.toUpperCase()}
|
||||
- Evidence: ${JSON.stringify(r.evidence)}
|
||||
- Reasoning: ${r.reason}
|
||||
`).join('\n')}
|
||||
|
||||
#### Corrected Understanding
|
||||
|
||||
Previous misunderstandings identified and corrected:
|
||||
|
||||
${corrections.map(c => `
|
||||
- ~~${c.wrong}~~ → ${c.corrected}
|
||||
- Why wrong: ${c.reason}
|
||||
- Evidence: ${c.evidence}
|
||||
`).join('\n')}
|
||||
|
||||
#### New Insights
|
||||
|
||||
${newInsights.join('\n- ')}
|
||||
|
||||
#### Gemini Analysis
|
||||
|
||||
${geminiAnalysis}
|
||||
|
||||
${confirmedHypothesis ? `
|
||||
#### Root Cause Identified
|
||||
|
||||
**${confirmedHypothesis.id}**: ${confirmedHypothesis.description}
|
||||
|
||||
Evidence supporting this conclusion:
|
||||
${confirmedHypothesis.supportingEvidence}
|
||||
` : `
|
||||
#### Next Steps
|
||||
|
||||
${nextSteps}
|
||||
`}
|
||||
|
||||
---
|
||||
|
||||
## Current Consolidated Understanding (Updated)
|
||||
|
||||
${consolidatedUnderstanding}
|
||||
```
|
||||
|
||||
**Step 2.4: Consolidate Understanding**
|
||||
|
||||
At the bottom of `understanding.md`, update the consolidated section:
|
||||
|
||||
- Remove or simplify proven-wrong assumptions
|
||||
- Keep them in strikethrough for reference
|
||||
- Focus on current valid understanding
|
||||
- Avoid repeating details from timeline
|
||||
|
||||
```markdown
|
||||
## Current Consolidated Understanding
|
||||
|
||||
### What We Know
|
||||
|
||||
- ${validUnderstanding1}
|
||||
- ${validUnderstanding2}
|
||||
|
||||
### What Was Disproven
|
||||
|
||||
- ~~Initial assumption: ${wrongAssumption}~~ (Evidence: ${disproofEvidence})
|
||||
|
||||
### Current Investigation Focus
|
||||
|
||||
${currentFocus}
|
||||
|
||||
### Remaining Questions
|
||||
|
||||
- ${openQuestion1}
|
||||
- ${openQuestion2}
|
||||
```
|
||||
|
||||
**Step 2.5: Update hypotheses.json**
|
||||
|
||||
```json
|
||||
{
|
||||
"iteration": 2,
|
||||
"timestamp": "2025-01-21T10:15:00+08:00",
|
||||
"hypotheses": [
|
||||
{
|
||||
"id": "H1",
|
||||
"status": "rejected",
|
||||
"verdict_reason": "Evidence shows key exists with valid value",
|
||||
"evidence": {...}
|
||||
},
|
||||
{
|
||||
"id": "H2",
|
||||
"status": "confirmed",
|
||||
"verdict_reason": "Log data confirms timing issue",
|
||||
"evidence": {...}
|
||||
}
|
||||
],
|
||||
"gemini_corrections": [
|
||||
{
|
||||
"wrong_assumption": "...",
|
||||
"corrected_to": "...",
|
||||
"reason": "..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Fix & Verification
|
||||
|
||||
**Step 3.1: Apply Fix**
|
||||
|
||||
(Same as original debug command)
|
||||
|
||||
**Step 3.2: Document Resolution**
|
||||
|
||||
Append to `understanding.md`:
|
||||
|
||||
```markdown
|
||||
### Iteration ${n} - Resolution (${timestamp})
|
||||
|
||||
#### Fix Applied
|
||||
|
||||
- Modified files: ${modifiedFiles.join(', ')}
|
||||
- Fix description: ${fixDescription}
|
||||
- Root cause addressed: ${rootCause}
|
||||
|
||||
#### Verification Results
|
||||
|
||||
${verificationResults}
|
||||
|
||||
#### Lessons Learned
|
||||
|
||||
What we learned from this debugging session:
|
||||
|
||||
1. ${lesson1}
|
||||
2. ${lesson2}
|
||||
3. ${lesson3}
|
||||
|
||||
#### Key Insights for Future
|
||||
|
||||
- ${insight1}
|
||||
- ${insight2}
|
||||
```
|
||||
|
||||
**Step 3.3: Cleanup**
|
||||
|
||||
Remove all temporary debug logging statements added during investigation. Verify no instrumentation code remains in production code.
|
||||
|
||||
---
|
||||
|
||||
## Session Folder Structure
|
||||
|
||||
```
|
||||
.workflow/.debug/DBG-{slug}-{date}/
|
||||
├── debug.log # NDJSON log (execution evidence)
|
||||
├── understanding.md # NEW: Exploration timeline + consolidated understanding
|
||||
├── hypotheses.json # NEW: Hypothesis history with verdicts
|
||||
└── resolution.md # Optional: Final summary
|
||||
```
|
||||
|
||||
## Understanding Document Template
|
||||
|
||||
```markdown
|
||||
# Understanding Document
|
||||
|
||||
**Session ID**: DBG-xxx-2025-01-21
|
||||
**Bug Description**: [original description]
|
||||
**Started**: 2025-01-21T10:00:00+08:00
|
||||
|
||||
---
|
||||
|
||||
## Exploration Timeline
|
||||
|
||||
### Iteration 1 - Initial Exploration (2025-01-21 10:00)
|
||||
|
||||
#### Current Understanding
|
||||
...
|
||||
|
||||
#### Evidence from Code Search
|
||||
...
|
||||
|
||||
#### Hypotheses Generated (Gemini-Assisted)
|
||||
...
|
||||
|
||||
### Iteration 2 - Evidence Analysis (2025-01-21 10:15)
|
||||
|
||||
#### Log Analysis Results
|
||||
...
|
||||
|
||||
#### Corrected Understanding
|
||||
- ~~[wrong]~~ → [corrected]
|
||||
|
||||
#### Gemini Analysis
|
||||
...
|
||||
|
||||
---
|
||||
|
||||
## Current Consolidated Understanding
|
||||
|
||||
### What We Know
|
||||
- [valid understanding points]
|
||||
|
||||
### What Was Disproven
|
||||
- ~~[disproven assumptions]~~
|
||||
|
||||
### Current Investigation Focus
|
||||
[current focus]
|
||||
|
||||
### Remaining Questions
|
||||
- [open questions]
|
||||
```
|
||||
|
||||
## Iteration Flow
|
||||
|
||||
```
|
||||
First Call (/workflow:debug-with-file "error"):
|
||||
├─ No session exists → Explore mode
|
||||
├─ Extract error keywords, search codebase
|
||||
├─ Document initial understanding in understanding.md
|
||||
├─ Use Gemini to generate hypotheses
|
||||
├─ Add logging instrumentation
|
||||
└─ Await user reproduction
|
||||
|
||||
After Reproduction (/workflow:debug-with-file "error"):
|
||||
├─ Session exists + debug.log has content → Analyze mode
|
||||
├─ Parse log, use Gemini to evaluate hypotheses
|
||||
├─ Update understanding.md with:
|
||||
│ ├─ Evidence analysis results
|
||||
│ ├─ Corrected misunderstandings (strikethrough)
|
||||
│ ├─ New insights
|
||||
│ └─ Updated consolidated understanding
|
||||
├─ Update hypotheses.json with verdicts
|
||||
└─ Decision:
|
||||
├─ Confirmed → Fix → Document resolution
|
||||
├─ Inconclusive → Add logging, document next steps
|
||||
└─ All rejected → Gemini-assisted new hypotheses
|
||||
|
||||
Output:
|
||||
├─ .workflow/.debug/DBG-{slug}-{date}/debug.log
|
||||
├─ .workflow/.debug/DBG-{slug}-{date}/understanding.md (evolving document)
|
||||
└─ .workflow/.debug/DBG-{slug}-{date}/hypotheses.json (history)
|
||||
```
|
||||
|
||||
## Gemini Integration Points
|
||||
|
||||
### 1. Hypothesis Generation (Explore Mode)
|
||||
|
||||
**Purpose**: Generate evidence-based, testable hypotheses
|
||||
|
||||
**Prompt Pattern**:
|
||||
```
|
||||
PURPOSE: Generate debugging hypotheses + evidence criteria
|
||||
TASK: Analyze error + code → testable hypotheses with clear pass/fail criteria
|
||||
CONTEXT: @understanding.md (search results)
|
||||
EXPECTED: JSON with hypotheses, likelihood ranking, evidence criteria
|
||||
```
|
||||
|
||||
### 2. Evidence Analysis (Analyze Mode)
|
||||
|
||||
**Purpose**: Validate hypotheses and correct misunderstandings
|
||||
|
||||
**Prompt Pattern**:
|
||||
```
|
||||
PURPOSE: Analyze debug log evidence + correct understanding
|
||||
TASK: Evaluate each hypothesis → identify wrong assumptions → suggest corrections
|
||||
CONTEXT: @debug.log @understanding.md @hypotheses.json
|
||||
EXPECTED: Verdicts + corrections + updated consolidated understanding
|
||||
```
|
||||
|
||||
### 3. New Hypothesis Generation (After All Rejected)
|
||||
|
||||
**Purpose**: Generate new hypotheses based on what was disproven
|
||||
|
||||
**Prompt Pattern**:
|
||||
```
|
||||
PURPOSE: Generate new hypotheses given disproven assumptions
|
||||
TASK: Review rejected hypotheses → identify knowledge gaps → new investigation angles
|
||||
CONTEXT: @understanding.md (with disproven section) @hypotheses.json
|
||||
EXPECTED: New hypotheses avoiding previously rejected paths
|
||||
```
|
||||
|
||||
## Error Correction Mechanism
|
||||
|
||||
### Correction Format in understanding.md
|
||||
|
||||
```markdown
|
||||
#### Corrected Understanding
|
||||
|
||||
- ~~Assumed dict key "config" was missing~~ → Key exists, but value is None
|
||||
- Why wrong: Only checked existence, not value validity
|
||||
- Evidence: H1 log shows {"config": null, "exists": true}
|
||||
|
||||
- ~~Thought error occurred in initialization~~ → Error happens during runtime update
|
||||
- Why wrong: Stack trace misread as init code
|
||||
- Evidence: H2 timestamp shows 30s after startup
|
||||
```
|
||||
|
||||
### Consolidation Rules
|
||||
|
||||
When updating "Current Consolidated Understanding":
|
||||
|
||||
1. **Simplify disproven items**: Move to "What Was Disproven" with single-line summary
|
||||
2. **Keep valid insights**: Promote confirmed findings to "What We Know"
|
||||
3. **Avoid duplication**: Don't repeat timeline details in consolidated section
|
||||
4. **Focus on current state**: What do we know NOW, not the journey
|
||||
5. **Preserve key corrections**: Keep important wrong→right transformations for learning
|
||||
|
||||
**Bad (cluttered)**:
|
||||
```markdown
|
||||
## Current Consolidated Understanding
|
||||
|
||||
In iteration 1 we thought X, but in iteration 2 we found Y, then in iteration 3...
|
||||
Also we checked A and found B, and then we checked C...
|
||||
```
|
||||
|
||||
**Good (consolidated)**:
|
||||
```markdown
|
||||
## Current Consolidated Understanding
|
||||
|
||||
### What We Know
|
||||
- Error occurs during runtime update, not initialization
|
||||
- Config value is None (not missing key)
|
||||
|
||||
### What Was Disproven
|
||||
- ~~Initialization error~~ (Timing evidence)
|
||||
- ~~Missing key hypothesis~~ (Key exists)
|
||||
|
||||
### Current Investigation Focus
|
||||
Why is config value None during update?
|
||||
```
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Situation | Action |
|
||||
|-----------|--------|
|
||||
| Empty debug.log | Verify reproduction triggered the code path |
|
||||
| All hypotheses rejected | Use Gemini to generate new hypotheses based on disproven assumptions |
|
||||
| Fix doesn't work | Document failed fix attempt, iterate with refined understanding |
|
||||
| >5 iterations | Review consolidated understanding, escalate to `/workflow:lite-fix` with full context |
|
||||
| Gemini unavailable | Fallback to manual hypothesis generation, document without Gemini insights |
|
||||
| Understanding too long | Consolidate aggressively, archive old iterations to separate file |
|
||||
|
||||
## Comparison with /workflow:debug
|
||||
|
||||
| Feature | /workflow:debug | /workflow:debug-with-file |
|
||||
|---------|-----------------|---------------------------|
|
||||
| NDJSON debug logging | ✅ | ✅ |
|
||||
| Hypothesis generation | Manual | Gemini-assisted |
|
||||
| Exploration documentation | ❌ | ✅ understanding.md |
|
||||
| Understanding evolution | ❌ | ✅ Timeline + corrections |
|
||||
| Error correction | ❌ | ✅ Strikethrough + reasoning |
|
||||
| Consolidated learning | ❌ | ✅ Current understanding section |
|
||||
| Hypothesis history | ❌ | ✅ hypotheses.json |
|
||||
| Gemini validation | ❌ | ✅ At key decision points |
|
||||
|
||||
## Usage Recommendations
|
||||
|
||||
Use `/workflow:debug-with-file` when:
|
||||
- Complex bugs requiring multiple investigation rounds
|
||||
- Learning from debugging process is valuable
|
||||
- Team needs to understand debugging rationale
|
||||
- Bug might recur, documentation helps prevention
|
||||
|
||||
Use `/workflow:debug` when:
|
||||
- Simple, quick bugs
|
||||
- One-off issues
|
||||
- Documentation overhead not needed
|
||||
File diff suppressed because it is too large
Load Diff
224
.claude/commands/workflow/init.md
Normal file
224
.claude/commands/workflow/init.md
Normal file
@@ -0,0 +1,224 @@
|
||||
---
|
||||
name: init
|
||||
description: Initialize project-level state with intelligent project analysis using cli-explore-agent
|
||||
argument-hint: "[--regenerate]"
|
||||
examples:
|
||||
- /workflow:init
|
||||
- /workflow:init --regenerate
|
||||
---
|
||||
|
||||
# Workflow Init Command (/workflow:init)
|
||||
|
||||
## Overview
|
||||
Initialize `.workflow/project-tech.json` and `.workflow/project-guidelines.json` with comprehensive project understanding by delegating analysis to **cli-explore-agent**.
|
||||
|
||||
**Dual File System**:
|
||||
- `project-tech.json`: Auto-generated technical analysis (stack, architecture, components)
|
||||
- `project-guidelines.json`: User-maintained rules and constraints (created as scaffold)
|
||||
|
||||
**Note**: This command may be called by other workflow commands. Upon completion, return immediately to continue the calling workflow without interrupting the task flow.
|
||||
|
||||
## Usage
|
||||
```bash
|
||||
/workflow:init # Initialize (skip if exists)
|
||||
/workflow:init --regenerate # Force regeneration
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Parse --regenerate flag → regenerate = true | false
|
||||
|
||||
Decision:
|
||||
├─ BOTH_EXIST + no --regenerate → Exit: "Already initialized"
|
||||
├─ EXISTS + --regenerate → Backup existing → Continue analysis
|
||||
└─ NOT_FOUND → Continue analysis
|
||||
|
||||
Analysis Flow:
|
||||
├─ Get project metadata (name, root)
|
||||
├─ Invoke cli-explore-agent
|
||||
│ ├─ Structural scan (get_modules_by_depth.sh, find, wc)
|
||||
│ ├─ Semantic analysis (Gemini CLI)
|
||||
│ ├─ Synthesis and merge
|
||||
│ └─ Write .workflow/project-tech.json
|
||||
├─ Create guidelines scaffold (if not exists)
|
||||
│ └─ Write .workflow/project-guidelines.json (empty structure)
|
||||
└─ Display summary
|
||||
|
||||
Output:
|
||||
├─ .workflow/project-tech.json (+ .backup if regenerate)
|
||||
└─ .workflow/project-guidelines.json (scaffold if new)
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Step 1: Parse Input and Check Existing State
|
||||
|
||||
**Parse --regenerate flag**:
|
||||
```javascript
|
||||
const regenerate = $ARGUMENTS.includes('--regenerate')
|
||||
```
|
||||
|
||||
**Check existing state**:
|
||||
|
||||
```bash
|
||||
bash(test -f .workflow/project-tech.json && echo "TECH_EXISTS" || echo "TECH_NOT_FOUND")
|
||||
bash(test -f .workflow/project-guidelines.json && echo "GUIDELINES_EXISTS" || echo "GUIDELINES_NOT_FOUND")
|
||||
```
|
||||
|
||||
**If BOTH_EXIST and no --regenerate**: Exit early
|
||||
```
|
||||
Project already initialized:
|
||||
- Tech analysis: .workflow/project-tech.json
|
||||
- Guidelines: .workflow/project-guidelines.json
|
||||
|
||||
Use /workflow:init --regenerate to rebuild tech analysis
|
||||
Use /workflow:session:solidify to add guidelines
|
||||
Use /workflow:status --project to view state
|
||||
```
|
||||
|
||||
### Step 2: Get Project Metadata
|
||||
|
||||
```bash
|
||||
bash(basename "$(git rev-parse --show-toplevel 2>/dev/null || pwd)")
|
||||
bash(git rev-parse --show-toplevel 2>/dev/null || pwd)
|
||||
bash(mkdir -p .workflow)
|
||||
```
|
||||
|
||||
### Step 3: Invoke cli-explore-agent
|
||||
|
||||
**For --regenerate**: Backup and preserve existing data
|
||||
```bash
|
||||
bash(cp .workflow/project-tech.json .workflow/project-tech.json.backup)
|
||||
```
|
||||
|
||||
**Delegate analysis to agent**:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description="Deep project analysis",
|
||||
prompt=`
|
||||
Analyze project for workflow initialization and generate .workflow/project-tech.json.
|
||||
|
||||
## MANDATORY FIRST STEPS
|
||||
1. Execute: cat ~/.claude/workflows/cli-templates/schemas/project-tech-schema.json (get schema reference)
|
||||
2. Execute: ccw tool exec get_modules_by_depth '{}' (get project structure)
|
||||
|
||||
## Task
|
||||
Generate complete project-tech.json following the schema structure:
|
||||
- project_name: "${projectName}"
|
||||
- initialized_at: ISO 8601 timestamp
|
||||
- overview: {
|
||||
description: "Brief project description",
|
||||
technology_stack: {
|
||||
languages: [{name, file_count, primary}],
|
||||
frameworks: ["string"],
|
||||
build_tools: ["string"],
|
||||
test_frameworks: ["string"]
|
||||
},
|
||||
architecture: {style, layers: [], patterns: []},
|
||||
key_components: [{name, path, description, importance}]
|
||||
}
|
||||
- features: []
|
||||
- development_index: ${regenerate ? 'preserve from backup' : '{feature: [], enhancement: [], bugfix: [], refactor: [], docs: []}'}
|
||||
- statistics: ${regenerate ? 'preserve from backup' : '{total_features: 0, total_sessions: 0, last_updated: ISO timestamp}'}
|
||||
- _metadata: {initialized_by: "cli-explore-agent", analysis_timestamp: ISO timestamp, analysis_mode: "deep-scan"}
|
||||
|
||||
## Analysis Requirements
|
||||
|
||||
**Technology Stack**:
|
||||
- Languages: File counts, mark primary
|
||||
- Frameworks: From package.json, requirements.txt, go.mod, etc.
|
||||
- Build tools: npm, cargo, maven, webpack, vite
|
||||
- Test frameworks: jest, pytest, go test, junit
|
||||
|
||||
**Architecture**:
|
||||
- Style: MVC, microservices, layered (from structure & imports)
|
||||
- Layers: presentation, business-logic, data-access
|
||||
- Patterns: singleton, factory, repository
|
||||
- Key components: 5-10 modules {name, path, description, importance}
|
||||
|
||||
## Execution
|
||||
1. Structural scan: get_modules_by_depth.sh, find, wc -l
|
||||
2. Semantic analysis: Gemini for patterns/architecture
|
||||
3. Synthesis: Merge findings
|
||||
4. ${regenerate ? 'Merge with preserved development_index and statistics from .workflow/project-tech.json.backup' : ''}
|
||||
5. Write JSON: Write('.workflow/project-tech.json', jsonContent)
|
||||
6. Report: Return brief completion summary
|
||||
|
||||
Project root: ${projectRoot}
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Step 3.5: Create Guidelines Scaffold (if not exists)
|
||||
|
||||
```javascript
|
||||
// Only create if not exists (never overwrite user guidelines)
|
||||
if (!file_exists('.workflow/project-guidelines.json')) {
|
||||
const guidelinesScaffold = {
|
||||
conventions: {
|
||||
coding_style: [],
|
||||
naming_patterns: [],
|
||||
file_structure: [],
|
||||
documentation: []
|
||||
},
|
||||
constraints: {
|
||||
architecture: [],
|
||||
tech_stack: [],
|
||||
performance: [],
|
||||
security: []
|
||||
},
|
||||
quality_rules: [],
|
||||
learnings: [],
|
||||
_metadata: {
|
||||
created_at: new Date().toISOString(),
|
||||
version: "1.0.0"
|
||||
}
|
||||
};
|
||||
|
||||
Write('.workflow/project-guidelines.json', JSON.stringify(guidelinesScaffold, null, 2));
|
||||
}
|
||||
```
|
||||
|
||||
### Step 4: Display Summary
|
||||
|
||||
```javascript
|
||||
const projectTech = JSON.parse(Read('.workflow/project-tech.json'));
|
||||
const guidelinesExists = file_exists('.workflow/project-guidelines.json');
|
||||
|
||||
console.log(`
|
||||
✓ Project initialized successfully
|
||||
|
||||
## Project Overview
|
||||
Name: ${projectTech.project_name}
|
||||
Description: ${projectTech.overview.description}
|
||||
|
||||
### Technology Stack
|
||||
Languages: ${projectTech.overview.technology_stack.languages.map(l => l.name).join(', ')}
|
||||
Frameworks: ${projectTech.overview.technology_stack.frameworks.join(', ')}
|
||||
|
||||
### Architecture
|
||||
Style: ${projectTech.overview.architecture.style}
|
||||
Components: ${projectTech.overview.key_components.length} core modules
|
||||
|
||||
---
|
||||
Files created:
|
||||
- Tech analysis: .workflow/project-tech.json
|
||||
- Guidelines: .workflow/project-guidelines.json ${guidelinesExists ? '(scaffold)' : ''}
|
||||
${regenerate ? '- Backup: .workflow/project-tech.json.backup' : ''}
|
||||
|
||||
Next steps:
|
||||
- Use /workflow:session:solidify to add project guidelines
|
||||
- Use /workflow:plan to start planning
|
||||
`);
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
**Agent Failure**: Fall back to basic initialization with placeholder overview
|
||||
**Missing Tools**: Agent uses Qwen fallback or bash-only
|
||||
**Empty Project**: Create minimal JSON with all gaps identified
|
||||
743
.claude/commands/workflow/lite-execute.md
Normal file
743
.claude/commands/workflow/lite-execute.md
Normal file
@@ -0,0 +1,743 @@
|
||||
---
|
||||
name: lite-execute
|
||||
description: Execute tasks based on in-memory plan, prompt description, or file content
|
||||
argument-hint: "[-y|--yes] [--in-memory] [\"task description\"|file-path]"
|
||||
allowed-tools: TodoWrite(*), Task(*), Bash(*)
|
||||
---
|
||||
|
||||
# Workflow Lite-Execute Command (/workflow:lite-execute)
|
||||
|
||||
## Overview
|
||||
|
||||
Flexible task execution command supporting three input modes: in-memory plan (from lite-plan), direct prompt description, or file content. Handles execution orchestration, progress tracking, and optional code review.
|
||||
|
||||
**Core capabilities:**
|
||||
- Multi-mode input (in-memory plan, prompt description, or file path)
|
||||
- Execution orchestration (Agent or Codex) with full context
|
||||
- Live progress tracking via TodoWrite at execution call level
|
||||
- Optional code review with selected tool (Gemini, Agent, or custom)
|
||||
- Context continuity across multiple executions
|
||||
- Intelligent format detection (Enhanced Task JSON vs plain text)
|
||||
|
||||
## Usage
|
||||
|
||||
### Command Syntax
|
||||
```bash
|
||||
/workflow:lite-execute [FLAGS] <INPUT>
|
||||
|
||||
# Flags
|
||||
--in-memory Use plan from memory (called by lite-plan)
|
||||
|
||||
# Arguments
|
||||
<input> Task description string, or path to file (required)
|
||||
```
|
||||
|
||||
## Input Modes
|
||||
|
||||
### Mode 1: In-Memory Plan
|
||||
|
||||
**Trigger**: Called by lite-plan after Phase 4 approval with `--in-memory` flag
|
||||
|
||||
**Input Source**: `executionContext` global variable set by lite-plan
|
||||
|
||||
**Content**: Complete execution context (see Data Structures section)
|
||||
|
||||
**Behavior**:
|
||||
- Skip execution method selection (already set by lite-plan)
|
||||
- Directly proceed to execution with full context
|
||||
- All planning artifacts available (exploration, clarifications, plan)
|
||||
|
||||
### Mode 2: Prompt Description
|
||||
|
||||
**Trigger**: User calls with task description string
|
||||
|
||||
**Input**: Simple task description (e.g., "Add unit tests for auth module")
|
||||
|
||||
**Behavior**:
|
||||
- Store prompt as `originalUserInput`
|
||||
- Create simple execution plan from prompt
|
||||
- AskUserQuestion: Select execution method (Agent/Codex/Auto)
|
||||
- AskUserQuestion: Select code review tool (Skip/Gemini/Agent/Other)
|
||||
- Proceed to execution with `originalUserInput` included
|
||||
|
||||
**User Interaction**:
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use defaults
|
||||
console.log(`[--yes] Auto-confirming execution:`)
|
||||
console.log(` - Execution method: Auto`)
|
||||
console.log(` - Code review: Skip`)
|
||||
|
||||
userSelection = {
|
||||
execution_method: "Auto",
|
||||
code_review_tool: "Skip"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Select execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: "Auto-select based on complexity" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Enable code review after execution?",
|
||||
header: "Code Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Skip", description: "No review" },
|
||||
{ label: "Gemini Review", description: "Gemini CLI tool" },
|
||||
{ label: "Codex Review", description: "Git-aware review (prompt OR --uncommitted)" },
|
||||
{ label: "Agent Review", description: "Current agent review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Mode 3: File Content
|
||||
|
||||
**Trigger**: User calls with file path
|
||||
|
||||
**Input**: Path to file containing task description or plan.json
|
||||
|
||||
**Step 1: Read and Detect Format**
|
||||
|
||||
```javascript
|
||||
fileContent = Read(filePath)
|
||||
|
||||
// Attempt JSON parsing
|
||||
try {
|
||||
jsonData = JSON.parse(fileContent)
|
||||
|
||||
// Check if plan.json from lite-plan session
|
||||
if (jsonData.summary && jsonData.approach && jsonData.tasks) {
|
||||
planObject = jsonData
|
||||
originalUserInput = jsonData.summary
|
||||
isPlanJson = true
|
||||
} else {
|
||||
// Valid JSON but not plan.json - treat as plain text
|
||||
originalUserInput = fileContent
|
||||
isPlanJson = false
|
||||
}
|
||||
} catch {
|
||||
// Not valid JSON - treat as plain text prompt
|
||||
originalUserInput = fileContent
|
||||
isPlanJson = false
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Create Execution Plan**
|
||||
|
||||
If `isPlanJson === true`:
|
||||
- Use `planObject` directly
|
||||
- User selects execution method and code review
|
||||
|
||||
If `isPlanJson === false`:
|
||||
- Treat file content as prompt (same behavior as Mode 2)
|
||||
- Create simple execution plan from content
|
||||
|
||||
**Step 3: User Interaction**
|
||||
|
||||
- AskUserQuestion: Select execution method (Agent/Codex/Auto)
|
||||
- AskUserQuestion: Select code review tool
|
||||
- Proceed to execution with full context
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Decision (mode detection):
|
||||
├─ --in-memory flag → Mode 1: Load executionContext → Skip user selection
|
||||
├─ Ends with .md/.json/.txt → Mode 3: Read file → Detect format
|
||||
│ ├─ Valid plan.json → Use planObject → User selects method + review
|
||||
│ └─ Not plan.json → Treat as prompt → User selects method + review
|
||||
└─ Other → Mode 2: Prompt description → User selects method + review
|
||||
|
||||
Execution:
|
||||
├─ Step 1: Initialize result tracking (previousExecutionResults = [])
|
||||
├─ Step 2: Task grouping & batch creation
|
||||
│ ├─ Extract explicit depends_on (no file/keyword inference)
|
||||
│ ├─ Group: independent tasks → single parallel batch (maximize utilization)
|
||||
│ ├─ Group: dependent tasks → sequential phases (respect dependencies)
|
||||
│ └─ Create TodoWrite list for batches
|
||||
├─ Step 3: Launch execution
|
||||
│ ├─ Phase 1: All independent tasks (⚡ single batch, concurrent)
|
||||
│ └─ Phase 2+: Dependent tasks by dependency order
|
||||
├─ Step 4: Track progress (TodoWrite updates per batch)
|
||||
└─ Step 5: Code review (if codeReviewTool ≠ "Skip")
|
||||
|
||||
Output:
|
||||
└─ Execution complete with results in previousExecutionResults[]
|
||||
```
|
||||
|
||||
## Detailed Execution Steps
|
||||
|
||||
### Step 1: Initialize Execution Tracking
|
||||
|
||||
**Operations**:
|
||||
- Initialize result tracking for multi-execution scenarios
|
||||
- Set up `previousExecutionResults` array for context continuity
|
||||
- **In-Memory Mode**: Echo execution strategy from lite-plan for transparency
|
||||
|
||||
```javascript
|
||||
// Initialize result tracking
|
||||
previousExecutionResults = []
|
||||
|
||||
// In-Memory Mode: Echo execution strategy (transparency before execution)
|
||||
if (executionContext) {
|
||||
console.log(`
|
||||
📋 Execution Strategy (from lite-plan):
|
||||
Method: ${executionContext.executionMethod}
|
||||
Review: ${executionContext.codeReviewTool}
|
||||
Tasks: ${executionContext.planObject.tasks.length}
|
||||
Complexity: ${executionContext.planObject.complexity}
|
||||
${executionContext.executorAssignments ? ` Assignments: ${JSON.stringify(executionContext.executorAssignments)}` : ''}
|
||||
`)
|
||||
}
|
||||
```
|
||||
|
||||
### Step 2: Task Grouping & Batch Creation
|
||||
|
||||
**Dependency Analysis & Grouping Algorithm**:
|
||||
```javascript
|
||||
// Use explicit depends_on from plan.json (no inference from file/keywords)
|
||||
function extractDependencies(tasks) {
|
||||
const taskIdToIndex = {}
|
||||
tasks.forEach((t, i) => { taskIdToIndex[t.id] = i })
|
||||
|
||||
return tasks.map((task, i) => {
|
||||
// Only use explicit depends_on from plan.json
|
||||
const deps = (task.depends_on || [])
|
||||
.map(depId => taskIdToIndex[depId])
|
||||
.filter(idx => idx !== undefined && idx < i)
|
||||
return { ...task, taskIndex: i, dependencies: deps }
|
||||
})
|
||||
}
|
||||
|
||||
// Group into batches: maximize parallel execution
|
||||
function createExecutionCalls(tasks, executionMethod) {
|
||||
const tasksWithDeps = extractDependencies(tasks)
|
||||
const processed = new Set()
|
||||
const calls = []
|
||||
|
||||
// Phase 1: All independent tasks → single parallel batch (maximize utilization)
|
||||
const independentTasks = tasksWithDeps.filter(t => t.dependencies.length === 0)
|
||||
if (independentTasks.length > 0) {
|
||||
independentTasks.forEach(t => processed.add(t.taskIndex))
|
||||
calls.push({
|
||||
method: executionMethod,
|
||||
executionType: "parallel",
|
||||
groupId: "P1",
|
||||
taskSummary: independentTasks.map(t => t.title).join(' | '),
|
||||
tasks: independentTasks
|
||||
})
|
||||
}
|
||||
|
||||
// Phase 2: Dependent tasks → sequential batches (respect dependencies)
|
||||
let sequentialIndex = 1
|
||||
let remaining = tasksWithDeps.filter(t => !processed.has(t.taskIndex))
|
||||
|
||||
while (remaining.length > 0) {
|
||||
// Find tasks whose dependencies are all satisfied
|
||||
const ready = remaining.filter(t =>
|
||||
t.dependencies.every(d => processed.has(d))
|
||||
)
|
||||
|
||||
if (ready.length === 0) {
|
||||
console.warn('Circular dependency detected, forcing remaining tasks')
|
||||
ready.push(...remaining)
|
||||
}
|
||||
|
||||
// Group ready tasks (can run in parallel within this phase)
|
||||
ready.forEach(t => processed.add(t.taskIndex))
|
||||
calls.push({
|
||||
method: executionMethod,
|
||||
executionType: ready.length > 1 ? "parallel" : "sequential",
|
||||
groupId: ready.length > 1 ? `P${calls.length + 1}` : `S${sequentialIndex++}`,
|
||||
taskSummary: ready.map(t => t.title).join(ready.length > 1 ? ' | ' : ' → '),
|
||||
tasks: ready
|
||||
})
|
||||
|
||||
remaining = remaining.filter(t => !processed.has(t.taskIndex))
|
||||
}
|
||||
|
||||
return calls
|
||||
}
|
||||
|
||||
executionCalls = createExecutionCalls(planObject.tasks, executionMethod).map(c => ({ ...c, id: `[${c.groupId}]` }))
|
||||
|
||||
TodoWrite({
|
||||
todos: executionCalls.map(c => ({
|
||||
content: `${c.executionType === "parallel" ? "⚡" : "→"} ${c.id} (${c.tasks.length} tasks)`,
|
||||
status: "pending",
|
||||
activeForm: `Executing ${c.id}`
|
||||
}))
|
||||
})
|
||||
```
|
||||
|
||||
### Step 3: Launch Execution
|
||||
|
||||
**Executor Resolution** (任务级 executor 优先于全局设置):
|
||||
```javascript
|
||||
// 获取任务的 executor(优先使用 executorAssignments,fallback 到全局 executionMethod)
|
||||
function getTaskExecutor(task) {
|
||||
const assignments = executionContext?.executorAssignments || {}
|
||||
if (assignments[task.id]) {
|
||||
return assignments[task.id].executor // 'gemini' | 'codex' | 'agent'
|
||||
}
|
||||
// Fallback: 全局 executionMethod 映射
|
||||
const method = executionContext?.executionMethod || 'Auto'
|
||||
if (method === 'Agent') return 'agent'
|
||||
if (method === 'Codex') return 'codex'
|
||||
// Auto: 根据复杂度
|
||||
return planObject.complexity === 'Low' ? 'agent' : 'codex'
|
||||
}
|
||||
|
||||
// 按 executor 分组任务
|
||||
function groupTasksByExecutor(tasks) {
|
||||
const groups = { gemini: [], codex: [], agent: [] }
|
||||
tasks.forEach(task => {
|
||||
const executor = getTaskExecutor(task)
|
||||
groups[executor].push(task)
|
||||
})
|
||||
return groups
|
||||
}
|
||||
```
|
||||
|
||||
**Execution Flow**: Parallel batches concurrently → Sequential batches in order
|
||||
```javascript
|
||||
const parallel = executionCalls.filter(c => c.executionType === "parallel")
|
||||
const sequential = executionCalls.filter(c => c.executionType === "sequential")
|
||||
|
||||
// Phase 1: Launch all parallel batches (single message with multiple tool calls)
|
||||
if (parallel.length > 0) {
|
||||
TodoWrite({ todos: executionCalls.map(c => ({ status: c.executionType === "parallel" ? "in_progress" : "pending" })) })
|
||||
parallelResults = await Promise.all(parallel.map(c => executeBatch(c)))
|
||||
previousExecutionResults.push(...parallelResults)
|
||||
TodoWrite({ todos: executionCalls.map(c => ({ status: parallel.includes(c) ? "completed" : "pending" })) })
|
||||
}
|
||||
|
||||
// Phase 2: Execute sequential batches one by one
|
||||
for (const call of sequential) {
|
||||
TodoWrite({ todos: executionCalls.map(c => ({ status: c === call ? "in_progress" : "..." })) })
|
||||
result = await executeBatch(call)
|
||||
previousExecutionResults.push(result)
|
||||
TodoWrite({ todos: executionCalls.map(c => ({ status: "completed" or "pending" })) })
|
||||
}
|
||||
```
|
||||
|
||||
### Unified Task Prompt Builder
|
||||
|
||||
**Task Formatting Principle**: Each task is a self-contained checklist. The executor only needs to know what THIS task requires. Same template for Agent and CLI.
|
||||
|
||||
```javascript
|
||||
function buildExecutionPrompt(batch) {
|
||||
// Task template (6 parts: Modification Points → Why → How → Reference → Risks → Done)
|
||||
const formatTask = (t) => `
|
||||
## ${t.title}
|
||||
|
||||
**Scope**: \`${t.scope}\` | **Action**: ${t.action}
|
||||
|
||||
### Modification Points
|
||||
${t.modification_points.map(p => `- **${p.file}** → \`${p.target}\`: ${p.change}`).join('\n')}
|
||||
|
||||
${t.rationale ? `
|
||||
### Why this approach (Medium/High)
|
||||
${t.rationale.chosen_approach}
|
||||
${t.rationale.decision_factors?.length > 0 ? `\nKey factors: ${t.rationale.decision_factors.join(', ')}` : ''}
|
||||
${t.rationale.tradeoffs ? `\nTradeoffs: ${t.rationale.tradeoffs}` : ''}
|
||||
` : ''}
|
||||
|
||||
### How to do it
|
||||
${t.description}
|
||||
|
||||
${t.implementation.map(step => `- ${step}`).join('\n')}
|
||||
|
||||
${t.code_skeleton ? `
|
||||
### Code skeleton (High)
|
||||
${t.code_skeleton.interfaces?.length > 0 ? `**Interfaces**: ${t.code_skeleton.interfaces.map(i => `\`${i.name}\` - ${i.purpose}`).join(', ')}` : ''}
|
||||
${t.code_skeleton.key_functions?.length > 0 ? `\n**Functions**: ${t.code_skeleton.key_functions.map(f => `\`${f.signature}\` - ${f.purpose}`).join(', ')}` : ''}
|
||||
${t.code_skeleton.classes?.length > 0 ? `\n**Classes**: ${t.code_skeleton.classes.map(c => `\`${c.name}\` - ${c.purpose}`).join(', ')}` : ''}
|
||||
` : ''}
|
||||
|
||||
### Reference
|
||||
- Pattern: ${t.reference?.pattern || 'N/A'}
|
||||
- Files: ${t.reference?.files?.join(', ') || 'N/A'}
|
||||
${t.reference?.examples ? `- Notes: ${t.reference.examples}` : ''}
|
||||
|
||||
${t.risks?.length > 0 ? `
|
||||
### Risk mitigations (High)
|
||||
${t.risks.map(r => `- ${r.description} → **${r.mitigation}**`).join('\n')}
|
||||
` : ''}
|
||||
|
||||
### Done when
|
||||
${t.acceptance.map(c => `- [ ] ${c}`).join('\n')}
|
||||
${t.verification?.success_metrics?.length > 0 ? `\n**Success metrics**: ${t.verification.success_metrics.join(', ')}` : ''}`
|
||||
|
||||
// Build prompt
|
||||
const sections = []
|
||||
|
||||
if (originalUserInput) sections.push(`## Goal\n${originalUserInput}`)
|
||||
|
||||
sections.push(`## Tasks\n${batch.tasks.map(formatTask).join('\n\n---\n')}`)
|
||||
|
||||
// Context (reference only)
|
||||
const context = []
|
||||
if (previousExecutionResults.length > 0) {
|
||||
context.push(`### Previous Work\n${previousExecutionResults.map(r => `- ${r.tasksSummary}: ${r.status}`).join('\n')}`)
|
||||
}
|
||||
if (clarificationContext) {
|
||||
context.push(`### Clarifications\n${Object.entries(clarificationContext).map(([q, a]) => `- ${q}: ${a}`).join('\n')}`)
|
||||
}
|
||||
if (executionContext?.planObject?.data_flow?.diagram) {
|
||||
context.push(`### Data Flow\n${executionContext.planObject.data_flow.diagram}`)
|
||||
}
|
||||
if (executionContext?.session?.artifacts?.plan) {
|
||||
context.push(`### Artifacts\nPlan: ${executionContext.session.artifacts.plan}`)
|
||||
}
|
||||
// Project guidelines (user-defined constraints from /workflow:session:solidify)
|
||||
context.push(`### Project Guidelines\n@.workflow/project-guidelines.json`)
|
||||
if (context.length > 0) sections.push(`## Context\n${context.join('\n\n')}`)
|
||||
|
||||
sections.push(`Complete each task according to its "Done when" checklist.`)
|
||||
|
||||
return sections.join('\n\n')
|
||||
}
|
||||
```
|
||||
|
||||
**Option A: Agent Execution**
|
||||
|
||||
When to use:
|
||||
- `getTaskExecutor(task) === "agent"`
|
||||
- 或 `executionMethod = "Agent"` (全局 fallback)
|
||||
- 或 `executionMethod = "Auto" AND complexity = "Low"` (全局 fallback)
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="code-developer",
|
||||
run_in_background=false,
|
||||
description=batch.taskSummary,
|
||||
prompt=buildExecutionPrompt(batch)
|
||||
)
|
||||
```
|
||||
|
||||
**Result Collection**: After completion, collect result following `executionResult` structure (see Data Structures section)
|
||||
|
||||
**Option B: CLI Execution (Codex)**
|
||||
|
||||
When to use:
|
||||
- `getTaskExecutor(task) === "codex"`
|
||||
- 或 `executionMethod = "Codex"` (全局 fallback)
|
||||
- 或 `executionMethod = "Auto" AND complexity = "Medium/High"` (全局 fallback)
|
||||
|
||||
```bash
|
||||
ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write
|
||||
```
|
||||
|
||||
**Execution with fixed IDs** (predictable ID pattern):
|
||||
```javascript
|
||||
// Launch CLI in background, wait for task hook callback
|
||||
// Generate fixed execution ID: ${sessionId}-${groupId}
|
||||
const sessionId = executionContext?.session?.id || 'standalone'
|
||||
const fixedExecutionId = `${sessionId}-${batch.groupId}` // e.g., "implement-auth-2025-12-13-P1"
|
||||
|
||||
// Check if resuming from previous failed execution
|
||||
const previousCliId = batch.resumeFromCliId || null
|
||||
|
||||
// Build command with fixed ID (and optional resume for continuation)
|
||||
const cli_command = previousCliId
|
||||
? `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId} --resume ${previousCliId}`
|
||||
: `ccw cli -p "${buildExecutionPrompt(batch)}" --tool codex --mode write --id ${fixedExecutionId}`
|
||||
|
||||
// Execute in background, stop output and wait for task hook callback
|
||||
Bash(
|
||||
command=cli_command,
|
||||
run_in_background=true
|
||||
)
|
||||
// STOP HERE - CLI executes in background, task hook will notify on completion
|
||||
```
|
||||
|
||||
**Resume on Failure** (with fixed ID):
|
||||
```javascript
|
||||
// If execution failed or timed out, offer resume option
|
||||
if (bash_result.status === 'failed' || bash_result.status === 'timeout') {
|
||||
console.log(`
|
||||
⚠️ Execution incomplete. Resume available:
|
||||
Fixed ID: ${fixedExecutionId}
|
||||
Lookup: ccw cli detail ${fixedExecutionId}
|
||||
Resume: ccw cli -p "Continue tasks" --resume ${fixedExecutionId} --tool codex --mode write --id ${fixedExecutionId}-retry
|
||||
`)
|
||||
|
||||
// Store for potential retry in same session
|
||||
batch.resumeFromCliId = fixedExecutionId
|
||||
}
|
||||
```
|
||||
|
||||
**Result Collection**: After completion, analyze output and collect result following `executionResult` structure (include `cliExecutionId` for resume capability)
|
||||
|
||||
**Option C: CLI Execution (Gemini)**
|
||||
|
||||
When to use: `getTaskExecutor(task) === "gemini"` (分析类任务)
|
||||
|
||||
```bash
|
||||
# 使用统一的 buildExecutionPrompt,切换 tool 和 mode
|
||||
ccw cli -p "${buildExecutionPrompt(batch)}" --tool gemini --mode analysis --id ${sessionId}-${batch.groupId}
|
||||
```
|
||||
|
||||
### Step 4: Progress Tracking
|
||||
|
||||
Progress tracked at batch level (not individual task level). Icons: ⚡ (parallel, concurrent), → (sequential, one-by-one)
|
||||
|
||||
### Step 5: Code Review (Optional)
|
||||
|
||||
**Skip Condition**: Only run if `codeReviewTool ≠ "Skip"`
|
||||
|
||||
**Review Focus**: Verify implementation against plan acceptance criteria and verification requirements
|
||||
- Read plan.json for task acceptance criteria and verification checklist
|
||||
- Check each acceptance criterion is fulfilled
|
||||
- Verify success metrics from verification field (Medium/High complexity)
|
||||
- Run unit/integration tests specified in verification field
|
||||
- Validate code quality and identify issues
|
||||
- Ensure alignment with planned approach and risk mitigations
|
||||
|
||||
**Operations**:
|
||||
- Agent Review: Current agent performs direct review
|
||||
- Gemini Review: Execute gemini CLI with review prompt
|
||||
- Codex Review: Two options - (A) with prompt for complex reviews, (B) `--uncommitted` flag only for quick reviews
|
||||
- Custom tool: Execute specified CLI tool (qwen, etc.)
|
||||
|
||||
**Unified Review Template** (All tools use same standard):
|
||||
|
||||
**Review Criteria**:
|
||||
- **Acceptance Criteria**: Verify each criterion from plan.tasks[].acceptance
|
||||
- **Verification Checklist** (Medium/High): Check unit_tests, integration_tests, success_metrics from plan.tasks[].verification
|
||||
- **Code Quality**: Analyze quality, identify issues, suggest improvements
|
||||
- **Plan Alignment**: Validate implementation matches planned approach and risk mitigations
|
||||
|
||||
**Shared Prompt Template** (used by all CLI tools):
|
||||
```
|
||||
PURPOSE: Code review for implemented changes against plan acceptance criteria and verification requirements
|
||||
TASK: • Verify plan acceptance criteria fulfillment • Check verification requirements (unit tests, success metrics) • Analyze code quality • Identify issues • Suggest improvements • Validate plan adherence and risk mitigations
|
||||
MODE: analysis
|
||||
CONTEXT: @**/* @{plan.json} [@{exploration.json}] | Memory: Review lite-execute changes against plan requirements including verification checklist
|
||||
EXPECTED: Quality assessment with:
|
||||
- Acceptance criteria verification (all tasks)
|
||||
- Verification checklist validation (Medium/High: unit_tests, integration_tests, success_metrics)
|
||||
- Issue identification
|
||||
- Recommendations
|
||||
Explicitly check each acceptance criterion and verification item from plan.json tasks.
|
||||
CONSTRAINTS: Focus on plan acceptance criteria, verification requirements, and plan adherence | analysis=READ-ONLY
|
||||
```
|
||||
|
||||
**Tool-Specific Execution** (Apply shared prompt template above):
|
||||
|
||||
```bash
|
||||
# Method 1: Agent Review (current agent)
|
||||
# - Read plan.json: ${executionContext.session.artifacts.plan}
|
||||
# - Apply unified review criteria (see Shared Prompt Template)
|
||||
# - Report findings directly
|
||||
|
||||
# Method 2: Gemini Review (recommended)
|
||||
ccw cli -p "[Shared Prompt Template with artifacts]" --tool gemini --mode analysis
|
||||
# CONTEXT includes: @**/* @${plan.json} [@${exploration.json}]
|
||||
|
||||
# Method 3: Qwen Review (alternative)
|
||||
ccw cli -p "[Shared Prompt Template with artifacts]" --tool qwen --mode analysis
|
||||
# Same prompt as Gemini, different execution engine
|
||||
|
||||
# Method 4: Codex Review (git-aware) - Two mutually exclusive options:
|
||||
|
||||
# Option A: With custom prompt (reviews uncommitted by default)
|
||||
ccw cli -p "[Shared Prompt Template with artifacts]" --tool codex --mode review
|
||||
# Use for complex reviews with specific focus areas
|
||||
|
||||
# Option B: Target flag only (no prompt allowed)
|
||||
ccw cli --tool codex --mode review --uncommitted
|
||||
# Quick review of uncommitted changes without custom instructions
|
||||
|
||||
# ⚠️ IMPORTANT: -p prompt and target flags (--uncommitted/--base/--commit) are MUTUALLY EXCLUSIVE
|
||||
```
|
||||
|
||||
**Multi-Round Review with Fixed IDs**:
|
||||
```javascript
|
||||
// Generate fixed review ID
|
||||
const reviewId = `${sessionId}-review`
|
||||
|
||||
// First review pass with fixed ID
|
||||
const reviewResult = Bash(`ccw cli -p "[Review prompt]" --tool gemini --mode analysis --id ${reviewId}`)
|
||||
|
||||
// If issues found, continue review dialog with fixed ID chain
|
||||
if (hasUnresolvedIssues(reviewResult)) {
|
||||
// Resume with follow-up questions
|
||||
Bash(`ccw cli -p "Clarify the security concerns you mentioned" --resume ${reviewId} --tool gemini --mode analysis --id ${reviewId}-followup`)
|
||||
}
|
||||
```
|
||||
|
||||
**Implementation Note**: Replace `[Shared Prompt Template with artifacts]` placeholder with actual template content, substituting:
|
||||
- `@{plan.json}` → `@${executionContext.session.artifacts.plan}`
|
||||
- `[@{exploration.json}]` → exploration files from artifacts (if exists)
|
||||
|
||||
### Step 6: Update Development Index
|
||||
|
||||
**Trigger**: After all executions complete (regardless of code review)
|
||||
|
||||
**Skip Condition**: Skip if `.workflow/project-tech.json` does not exist
|
||||
|
||||
**Operations**:
|
||||
```javascript
|
||||
const projectJsonPath = '.workflow/project-tech.json'
|
||||
if (!fileExists(projectJsonPath)) return // Silent skip
|
||||
|
||||
const projectJson = JSON.parse(Read(projectJsonPath))
|
||||
|
||||
// Initialize if needed
|
||||
if (!projectJson.development_index) {
|
||||
projectJson.development_index = { feature: [], enhancement: [], bugfix: [], refactor: [], docs: [] }
|
||||
}
|
||||
|
||||
// Detect category from keywords
|
||||
function detectCategory(text) {
|
||||
text = text.toLowerCase()
|
||||
if (/\b(fix|bug|error|issue|crash)\b/.test(text)) return 'bugfix'
|
||||
if (/\b(refactor|cleanup|reorganize)\b/.test(text)) return 'refactor'
|
||||
if (/\b(doc|readme|comment)\b/.test(text)) return 'docs'
|
||||
if (/\b(add|new|create|implement)\b/.test(text)) return 'feature'
|
||||
return 'enhancement'
|
||||
}
|
||||
|
||||
// Detect sub_feature from task file paths
|
||||
function detectSubFeature(tasks) {
|
||||
const dirs = tasks.map(t => t.file?.split('/').slice(-2, -1)[0]).filter(Boolean)
|
||||
const counts = dirs.reduce((a, d) => { a[d] = (a[d] || 0) + 1; return a }, {})
|
||||
return Object.entries(counts).sort((a, b) => b[1] - a[1])[0]?.[0] || 'general'
|
||||
}
|
||||
|
||||
const category = detectCategory(`${planObject.summary} ${planObject.approach}`)
|
||||
const entry = {
|
||||
title: planObject.summary.slice(0, 60),
|
||||
sub_feature: detectSubFeature(planObject.tasks),
|
||||
date: new Date().toISOString().split('T')[0],
|
||||
description: planObject.approach.slice(0, 100),
|
||||
status: previousExecutionResults.every(r => r.status === 'completed') ? 'completed' : 'partial',
|
||||
session_id: executionContext?.session?.id || null
|
||||
}
|
||||
|
||||
projectJson.development_index[category].push(entry)
|
||||
projectJson.statistics.last_updated = new Date().toISOString()
|
||||
Write(projectJsonPath, JSON.stringify(projectJson, null, 2))
|
||||
|
||||
console.log(`✓ Development index: [${category}] ${entry.title}`)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
**Input Modes**: In-memory (lite-plan), prompt (standalone), file (JSON/text)
|
||||
**Task Grouping**: Based on explicit depends_on only; independent tasks run in single parallel batch
|
||||
**Execution**: All independent tasks launch concurrently via single Claude message with multiple tool calls
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Cause | Resolution |
|
||||
|-------|-------|------------|
|
||||
| Missing executionContext | --in-memory without context | Error: "No execution context found. Only available when called by lite-plan." |
|
||||
| File not found | File path doesn't exist | Error: "File not found: {path}. Check file path." |
|
||||
| Empty file | File exists but no content | Error: "File is empty: {path}. Provide task description." |
|
||||
| Invalid Enhanced Task JSON | JSON missing required fields | Warning: "Missing required fields. Treating as plain text." |
|
||||
| Malformed JSON | JSON parsing fails | Treat as plain text (expected for non-JSON files) |
|
||||
| Execution failure | Agent/Codex crashes | Display error, use fixed ID `${sessionId}-${groupId}` for resume: `ccw cli -p "Continue" --resume <fixed-id> --id <fixed-id>-retry` |
|
||||
| Execution timeout | CLI exceeded timeout | Use fixed ID for resume with extended timeout |
|
||||
| Codex unavailable | Codex not installed | Show installation instructions, offer Agent execution |
|
||||
| Fixed ID not found | Custom ID lookup failed | Check `ccw cli history`, verify date directories |
|
||||
|
||||
## Data Structures
|
||||
|
||||
### executionContext (Input - Mode 1)
|
||||
|
||||
Passed from lite-plan via global variable:
|
||||
|
||||
```javascript
|
||||
{
|
||||
planObject: {
|
||||
summary: string,
|
||||
approach: string,
|
||||
tasks: [...],
|
||||
estimated_time: string,
|
||||
recommended_execution: string,
|
||||
complexity: string
|
||||
},
|
||||
explorationsContext: {...} | null, // Multi-angle explorations
|
||||
explorationAngles: string[], // List of exploration angles
|
||||
explorationManifest: {...} | null, // Exploration manifest
|
||||
clarificationContext: {...} | null,
|
||||
executionMethod: "Agent" | "Codex" | "Auto", // 全局默认
|
||||
codeReviewTool: "Skip" | "Gemini Review" | "Agent Review" | string,
|
||||
originalUserInput: string,
|
||||
|
||||
// 任务级 executor 分配(优先于 executionMethod)
|
||||
executorAssignments: {
|
||||
[taskId]: { executor: "gemini" | "codex" | "agent", reason: string }
|
||||
},
|
||||
|
||||
// Session artifacts location (saved by lite-plan)
|
||||
session: {
|
||||
id: string, // Session identifier: {taskSlug}-{shortTimestamp}
|
||||
folder: string, // Session folder path: .workflow/.lite-plan/{session-id}
|
||||
artifacts: {
|
||||
explorations: [{angle, path}], // exploration-{angle}.json paths
|
||||
explorations_manifest: string, // explorations-manifest.json path
|
||||
plan: string // plan.json path (always present)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Artifact Usage**:
|
||||
- Artifact files contain detailed planning context
|
||||
- Pass artifact paths to CLI tools and agents for enhanced context
|
||||
- See execution options below for usage examples
|
||||
|
||||
### executionResult (Output)
|
||||
|
||||
Collected after each execution call completes:
|
||||
|
||||
```javascript
|
||||
{
|
||||
executionId: string, // e.g., "[Agent-1]", "[Codex-1]"
|
||||
status: "completed" | "partial" | "failed",
|
||||
tasksSummary: string, // Brief description of tasks handled
|
||||
completionSummary: string, // What was completed
|
||||
keyOutputs: string, // Files created/modified, key changes
|
||||
notes: string, // Important context for next execution
|
||||
fixedCliId: string | null // Fixed CLI execution ID (e.g., "implement-auth-2025-12-13-P1")
|
||||
}
|
||||
```
|
||||
|
||||
Appended to `previousExecutionResults` array for context continuity in multi-execution scenarios.
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
**Fixed ID Pattern**: `${sessionId}-${groupId}` enables predictable lookup without auto-generated timestamps.
|
||||
|
||||
**Resume Usage**: If `status` is "partial" or "failed", use `fixedCliId` to resume:
|
||||
```bash
|
||||
# Lookup previous execution
|
||||
ccw cli detail ${fixedCliId}
|
||||
|
||||
# Resume with new fixed ID for retry
|
||||
ccw cli -p "Continue from where we left off" --resume ${fixedCliId} --tool codex --mode write --id ${fixedCliId}-retry
|
||||
```
|
||||
780
.claude/commands/workflow/lite-fix.md
Normal file
780
.claude/commands/workflow/lite-fix.md
Normal file
@@ -0,0 +1,780 @@
|
||||
---
|
||||
name: lite-fix
|
||||
description: Lightweight bug diagnosis and fix workflow with intelligent severity assessment and optional hotfix mode for production incidents
|
||||
argument-hint: "[-y|--yes] [--hotfix] \"bug description or issue reference\""
|
||||
allowed-tools: TodoWrite(*), Task(*), SlashCommand(*), AskUserQuestion(*)
|
||||
---
|
||||
|
||||
# Workflow Lite-Fix Command (/workflow:lite-fix)
|
||||
|
||||
## Overview
|
||||
|
||||
Intelligent lightweight bug fixing command with dynamic workflow adaptation based on severity assessment. Focuses on diagnosis phases (root cause analysis, impact assessment, fix planning, confirmation) and delegates execution to `/workflow:lite-execute`.
|
||||
|
||||
**Core capabilities:**
|
||||
- Intelligent bug analysis with automatic severity detection
|
||||
- Dynamic code diagnosis (cli-explore-agent) for root cause identification
|
||||
- Interactive clarification after diagnosis to gather missing information
|
||||
- Adaptive fix planning strategy (direct Claude vs cli-lite-planning-agent) based on complexity
|
||||
- Two-step confirmation: fix-plan display -> multi-dimensional input collection
|
||||
- Execution execute with complete context handoff to lite-execute
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
/workflow:lite-fix [FLAGS] <BUG_DESCRIPTION>
|
||||
|
||||
# Flags
|
||||
-y, --yes Skip all confirmations (auto mode)
|
||||
--hotfix, -h Production hotfix mode (minimal diagnosis, fast fix)
|
||||
|
||||
# Arguments
|
||||
<bug-description> Bug description, error message, or path to .md file (required)
|
||||
|
||||
# Examples
|
||||
/workflow:lite-fix "用户登录失败" # Interactive mode
|
||||
/workflow:lite-fix --yes "用户登录失败" # Auto mode (no confirmations)
|
||||
/workflow:lite-fix -y --hotfix "生产环境数据库连接失败" # Auto + hotfix mode
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Clarification Questions**: Skipped (no clarification phase)
|
||||
- **Fix Plan Confirmation**: Auto-selected "Allow"
|
||||
- **Execution Method**: Auto-selected "Auto"
|
||||
- **Code Review**: Auto-selected "Skip"
|
||||
- **Severity**: Uses auto-detected severity (no manual override)
|
||||
- **Hotfix Mode**: Respects --hotfix flag if present, otherwise normal mode
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h')
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Phase 1: Bug Analysis & Diagnosis
|
||||
|- Parse input (description, error message, or .md file)
|
||||
|- Intelligent severity pre-assessment (Low/Medium/High/Critical)
|
||||
|- Diagnosis decision (auto-detect or --hotfix flag)
|
||||
|- Context protection: If file reading >=50k chars -> force cli-explore-agent
|
||||
+- Decision:
|
||||
|- needsDiagnosis=true -> Launch parallel cli-explore-agents (1-4 based on severity)
|
||||
+- needsDiagnosis=false (hotfix) -> Skip directly to Phase 3 (Fix Planning)
|
||||
|
||||
Phase 2: Clarification (optional, multi-round)
|
||||
|- Aggregate clarification_needs from all diagnosis angles
|
||||
|- Deduplicate similar questions
|
||||
+- Decision:
|
||||
|- Has clarifications -> AskUserQuestion (max 4 questions per round, multiple rounds allowed)
|
||||
+- No clarifications -> Skip to Phase 3
|
||||
|
||||
Phase 3: Fix Planning (NO CODE EXECUTION - planning only)
|
||||
+- Decision (based on Phase 1 severity):
|
||||
|- Low/Medium -> Load schema: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json -> Direct Claude planning (following schema) -> fix-plan.json -> MUST proceed to Phase 4
|
||||
+- High/Critical -> cli-lite-planning-agent -> fix-plan.json -> MUST proceed to Phase 4
|
||||
|
||||
Phase 4: Confirmation & Selection
|
||||
|- Display fix-plan summary (tasks, severity, estimated time)
|
||||
+- AskUserQuestion:
|
||||
|- Confirm: Allow / Modify / Cancel
|
||||
|- Execution: Agent / Codex / Auto
|
||||
+- Review: Gemini / Agent / Skip
|
||||
|
||||
Phase 5: Execute
|
||||
|- Build executionContext (fix-plan + diagnoses + clarifications + selections)
|
||||
+- SlashCommand("/workflow:lite-execute --in-memory --mode bugfix")
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Intelligent Multi-Angle Diagnosis
|
||||
|
||||
**Session Setup** (MANDATORY - follow exactly):
|
||||
```javascript
|
||||
// Helper: Get UTC+8 (China Standard Time) ISO string
|
||||
const getUtc8ISOString = () => new Date(Date.now() + 8 * 60 * 60 * 1000).toISOString()
|
||||
|
||||
const bugSlug = bug_description.toLowerCase().replace(/[^a-z0-9]+/g, '-').substring(0, 40)
|
||||
const dateStr = getUtc8ISOString().substring(0, 10) // Format: 2025-11-29
|
||||
|
||||
const sessionId = `${bugSlug}-${dateStr}` // e.g., "user-avatar-upload-fails-2025-11-29"
|
||||
const sessionFolder = `.workflow/.lite-fix/${sessionId}`
|
||||
|
||||
bash(`mkdir -p ${sessionFolder} && test -d ${sessionFolder} && echo "SUCCESS: ${sessionFolder}" || echo "FAILED: ${sessionFolder}"`)
|
||||
```
|
||||
|
||||
**Diagnosis Decision Logic**:
|
||||
```javascript
|
||||
const hotfixMode = $ARGUMENTS.includes('--hotfix') || $ARGUMENTS.includes('-h')
|
||||
|
||||
needsDiagnosis = (
|
||||
!hotfixMode &&
|
||||
(
|
||||
bug.lacks_specific_error_message ||
|
||||
bug.requires_codebase_context ||
|
||||
bug.needs_execution_tracing ||
|
||||
bug.root_cause_unclear
|
||||
)
|
||||
)
|
||||
|
||||
if (!needsDiagnosis) {
|
||||
// Skip to Phase 2 (Clarification) or Phase 3 (Fix Planning)
|
||||
proceed_to_next_phase()
|
||||
}
|
||||
```
|
||||
|
||||
**Context Protection**: File reading >=50k chars -> force `needsDiagnosis=true` (delegate to cli-explore-agent)
|
||||
|
||||
**Severity Pre-Assessment** (Intelligent Analysis):
|
||||
```javascript
|
||||
// Analyzes bug severity based on:
|
||||
// - Symptoms: Error messages, crash reports, user complaints
|
||||
// - Scope: How many users/features are affected?
|
||||
// - Urgency: Production down vs minor inconvenience
|
||||
// - Impact: Data loss, security, business impact
|
||||
|
||||
const severity = analyzeBugSeverity(bug_description)
|
||||
// Returns: 'Low' | 'Medium' | 'High' | 'Critical'
|
||||
// Low: Minor UI issue, localized, no data impact
|
||||
// Medium: Multiple users affected, degraded functionality
|
||||
// High: Significant functionality broken, many users affected
|
||||
// Critical: Production down, data loss risk, security issue
|
||||
|
||||
// Angle assignment based on bug type (orchestrator decides, not agent)
|
||||
const DIAGNOSIS_ANGLE_PRESETS = {
|
||||
runtime_error: ['error-handling', 'dataflow', 'state-management', 'edge-cases'],
|
||||
performance: ['performance', 'bottlenecks', 'caching', 'data-access'],
|
||||
security: ['security', 'auth-patterns', 'dataflow', 'validation'],
|
||||
data_corruption: ['data-integrity', 'state-management', 'transactions', 'validation'],
|
||||
ui_bug: ['state-management', 'event-handling', 'rendering', 'data-binding'],
|
||||
integration: ['api-contracts', 'error-handling', 'timeouts', 'fallbacks']
|
||||
}
|
||||
|
||||
function selectDiagnosisAngles(bugDescription, count) {
|
||||
const text = bugDescription.toLowerCase()
|
||||
let preset = 'runtime_error' // default
|
||||
|
||||
if (/slow|timeout|performance|lag|hang/.test(text)) preset = 'performance'
|
||||
else if (/security|auth|permission|access|token/.test(text)) preset = 'security'
|
||||
else if (/corrupt|data|lost|missing|inconsistent/.test(text)) preset = 'data_corruption'
|
||||
else if (/ui|display|render|style|click|button/.test(text)) preset = 'ui_bug'
|
||||
else if (/api|integration|connect|request|response/.test(text)) preset = 'integration'
|
||||
|
||||
return DIAGNOSIS_ANGLE_PRESETS[preset].slice(0, count)
|
||||
}
|
||||
|
||||
const selectedAngles = selectDiagnosisAngles(bug_description, severity === 'Critical' ? 4 : (severity === 'High' ? 3 : (severity === 'Medium' ? 2 : 1)))
|
||||
|
||||
console.log(`
|
||||
## Diagnosis Plan
|
||||
|
||||
Bug Severity: ${severity}
|
||||
Selected Angles: ${selectedAngles.join(', ')}
|
||||
|
||||
Launching ${selectedAngles.length} parallel diagnoses...
|
||||
`)
|
||||
```
|
||||
|
||||
**Launch Parallel Diagnoses** - Orchestrator assigns angle to each agent:
|
||||
|
||||
```javascript
|
||||
// Launch agents with pre-assigned diagnosis angles
|
||||
const diagnosisTasks = selectedAngles.map((angle, index) =>
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Diagnose: ${angle}`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Execute **${angle}** diagnosis for bug root cause analysis. Analyze codebase from this specific angle to discover root cause, affected paths, and fix hints.
|
||||
|
||||
## Assigned Context
|
||||
- **Diagnosis Angle**: ${angle}
|
||||
- **Bug Description**: ${bug_description}
|
||||
- **Diagnosis Index**: ${index + 1} of ${selectedAngles.length}
|
||||
- **Output File**: ${sessionFolder}/diagnosis-${angle}.json
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Run: ccw tool exec get_modules_by_depth '{}' (project structure)
|
||||
2. Run: rg -l "{error_keyword_from_bug}" --type ts (locate relevant files)
|
||||
3. Execute: cat ~/.claude/workflows/cli-templates/schemas/diagnosis-json-schema.json (get output schema reference)
|
||||
4. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
5. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
|
||||
|
||||
## Diagnosis Strategy (${angle} focus)
|
||||
|
||||
**Step 1: Error Tracing** (Bash)
|
||||
- rg for error messages, stack traces, log patterns
|
||||
- git log --since='2 weeks ago' for recent changes
|
||||
- Trace execution path in affected modules
|
||||
|
||||
**Step 2: Root Cause Analysis** (Gemini CLI)
|
||||
- What code paths lead to this ${angle} issue?
|
||||
- What edge cases are not handled from ${angle} perspective?
|
||||
- What recent changes might have introduced this bug?
|
||||
|
||||
**Step 3: Write Output**
|
||||
- Consolidate ${angle} findings into JSON
|
||||
- Identify ${angle}-specific clarification needs
|
||||
- Provide fix hints based on ${angle} analysis
|
||||
|
||||
## Expected Output
|
||||
|
||||
**File**: ${sessionFolder}/diagnosis-${angle}.json
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 3, follow schema exactly
|
||||
|
||||
**Required Fields** (all ${angle} focused):
|
||||
- symptom: Bug symptoms and error messages
|
||||
- root_cause: Root cause hypothesis from ${angle} perspective
|
||||
**IMPORTANT**: Use structured format:
|
||||
\`{file: "src/module/file.ts", line_range: "45-60", issue: "Description", confidence: 0.85}\`
|
||||
- affected_files: Files involved from ${angle} perspective
|
||||
**IMPORTANT**: Use object format with relevance scores:
|
||||
\`[{path: "src/file.ts", relevance: 0.85, rationale: "Contains ${angle} logic"}]\`
|
||||
- reproduction_steps: Steps to reproduce the bug
|
||||
- fix_hints: Suggested fix approaches from ${angle} viewpoint
|
||||
- dependencies: Dependencies relevant to ${angle} diagnosis
|
||||
- constraints: ${angle}-specific limitations affecting fix
|
||||
- clarification_needs: ${angle}-related ambiguities (options array + recommended index)
|
||||
- _metadata.diagnosis_angle: "${angle}"
|
||||
- _metadata.diagnosis_index: ${index + 1}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat diagnosis-json-schema.json
|
||||
- [ ] get_modules_by_depth.sh executed
|
||||
- [ ] Root cause identified with confidence score
|
||||
- [ ] At least 3 affected files identified with ${angle} rationale
|
||||
- [ ] Fix hints are actionable (specific code changes, not generic advice)
|
||||
- [ ] Reproduction steps are verifiable
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] clarification_needs includes options + recommended
|
||||
|
||||
## Output
|
||||
Write: ${sessionFolder}/diagnosis-${angle}.json
|
||||
Return: 2-3 sentence summary of ${angle} diagnosis findings
|
||||
`
|
||||
)
|
||||
)
|
||||
|
||||
// Execute all diagnosis tasks in parallel
|
||||
```
|
||||
|
||||
**Auto-discover Generated Diagnosis Files**:
|
||||
```javascript
|
||||
// After diagnoses complete, auto-discover all diagnosis-*.json files
|
||||
const diagnosisFiles = bash(`find ${sessionFolder} -name "diagnosis-*.json" -type f`)
|
||||
.split('\n')
|
||||
.filter(f => f.trim())
|
||||
|
||||
// Read metadata to build manifest
|
||||
const diagnosisManifest = {
|
||||
session_id: sessionId,
|
||||
bug_description: bug_description,
|
||||
timestamp: getUtc8ISOString(),
|
||||
severity: severity,
|
||||
diagnosis_count: diagnosisFiles.length,
|
||||
diagnoses: diagnosisFiles.map(file => {
|
||||
const data = JSON.parse(Read(file))
|
||||
const filename = path.basename(file)
|
||||
return {
|
||||
angle: data._metadata.diagnosis_angle,
|
||||
file: filename,
|
||||
path: file,
|
||||
index: data._metadata.diagnosis_index
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Write(`${sessionFolder}/diagnoses-manifest.json`, JSON.stringify(diagnosisManifest, null, 2))
|
||||
|
||||
console.log(`
|
||||
## Diagnosis Complete
|
||||
|
||||
Generated diagnosis files in ${sessionFolder}:
|
||||
${diagnosisManifest.diagnoses.map(d => `- diagnosis-${d.angle}.json (angle: ${d.angle})`).join('\n')}
|
||||
|
||||
Manifest: diagnoses-manifest.json
|
||||
Angles diagnosed: ${diagnosisManifest.diagnoses.map(d => d.angle).join(', ')}
|
||||
`)
|
||||
```
|
||||
|
||||
**Output**:
|
||||
- `${sessionFolder}/diagnosis-{angle1}.json`
|
||||
- `${sessionFolder}/diagnosis-{angle2}.json`
|
||||
- ... (1-4 files based on severity)
|
||||
- `${sessionFolder}/diagnoses-manifest.json`
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Clarification (Optional, Multi-Round)
|
||||
|
||||
**Skip if**: No diagnosis or `clarification_needs` is empty across all diagnoses
|
||||
|
||||
**⚠️ CRITICAL**: AskUserQuestion tool limits max 4 questions per call. **MUST execute multiple rounds** to exhaust all clarification needs - do NOT stop at round 1.
|
||||
|
||||
**Aggregate clarification needs from all diagnosis angles**:
|
||||
```javascript
|
||||
// Load manifest and all diagnosis files
|
||||
const manifest = JSON.parse(Read(`${sessionFolder}/diagnoses-manifest.json`))
|
||||
const diagnoses = manifest.diagnoses.map(diag => ({
|
||||
angle: diag.angle,
|
||||
data: JSON.parse(Read(diag.path))
|
||||
}))
|
||||
|
||||
// Aggregate clarification needs from all diagnoses
|
||||
const allClarifications = []
|
||||
diagnoses.forEach(diag => {
|
||||
if (diag.data.clarification_needs?.length > 0) {
|
||||
diag.data.clarification_needs.forEach(need => {
|
||||
allClarifications.push({
|
||||
...need,
|
||||
source_angle: diag.angle
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Deduplicate by question similarity
|
||||
function deduplicateClarifications(clarifications) {
|
||||
const unique = []
|
||||
clarifications.forEach(c => {
|
||||
const isDuplicate = unique.some(u =>
|
||||
u.question.toLowerCase() === c.question.toLowerCase()
|
||||
)
|
||||
if (!isDuplicate) unique.push(c)
|
||||
})
|
||||
return unique
|
||||
}
|
||||
|
||||
const uniqueClarifications = deduplicateClarifications(allClarifications)
|
||||
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Skip clarification phase
|
||||
console.log(`[--yes] Skipping ${uniqueClarifications.length} clarification questions`)
|
||||
console.log(`Proceeding to fix planning with diagnosis results...`)
|
||||
// Continue to Phase 3
|
||||
} else if (uniqueClarifications.length > 0) {
|
||||
// Interactive mode: Multi-round clarification
|
||||
// ⚠️ MUST execute ALL rounds until uniqueClarifications exhausted
|
||||
const BATCH_SIZE = 4
|
||||
const totalRounds = Math.ceil(uniqueClarifications.length / BATCH_SIZE)
|
||||
|
||||
for (let i = 0; i < uniqueClarifications.length; i += BATCH_SIZE) {
|
||||
const batch = uniqueClarifications.slice(i, i + BATCH_SIZE)
|
||||
const currentRound = Math.floor(i / BATCH_SIZE) + 1
|
||||
|
||||
console.log(`### Clarification Round ${currentRound}/${totalRounds}`)
|
||||
|
||||
AskUserQuestion({
|
||||
questions: batch.map(need => ({
|
||||
question: `[${need.source_angle}] ${need.question}\n\nContext: ${need.context}`,
|
||||
header: need.source_angle,
|
||||
multiSelect: false,
|
||||
options: need.options.map((opt, index) => {
|
||||
const isRecommended = need.recommended === index
|
||||
return {
|
||||
label: isRecommended ? `${opt} ★` : opt,
|
||||
description: isRecommended ? `Use ${opt} approach (Recommended)` : `Use ${opt} approach`
|
||||
}
|
||||
})
|
||||
}))
|
||||
})
|
||||
|
||||
// Store batch responses in clarificationContext before next round
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: `clarificationContext` (in-memory)
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Fix Planning
|
||||
|
||||
**Planning Strategy Selection** (based on Phase 1 severity):
|
||||
|
||||
**IMPORTANT**: Phase 3 is **planning only** - NO code execution. All execution happens in Phase 5 via lite-execute.
|
||||
|
||||
**Low/Medium Severity** - Direct planning by Claude:
|
||||
```javascript
|
||||
// Step 1: Read schema
|
||||
const schema = Bash(`cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json`)
|
||||
|
||||
// Step 2: Generate fix-plan following schema (Claude directly, no agent)
|
||||
// For Medium complexity: include rationale + verification (optional, but recommended)
|
||||
const fixPlan = {
|
||||
summary: "...",
|
||||
root_cause: "...",
|
||||
strategy: "immediate_patch|comprehensive_fix|refactor",
|
||||
tasks: [...], // Each task: { id, title, scope, ..., depends_on, complexity }
|
||||
estimated_time: "...",
|
||||
recommended_execution: "Agent",
|
||||
severity: severity,
|
||||
risk_level: "...",
|
||||
|
||||
// Medium complexity fields (optional for direct planning, auto-filled for Low)
|
||||
...(severity === "Medium" ? {
|
||||
design_decisions: [
|
||||
{
|
||||
decision: "Use immediate_patch strategy for minimal risk",
|
||||
rationale: "Keeps changes localized and quick to review",
|
||||
tradeoff: "Defers comprehensive refactoring"
|
||||
}
|
||||
],
|
||||
tasks_with_rationale: {
|
||||
// Each task gets rationale if Medium
|
||||
task_rationale_example: {
|
||||
rationale: {
|
||||
chosen_approach: "Direct fix approach",
|
||||
alternatives_considered: ["Workaround", "Refactor"],
|
||||
decision_factors: ["Minimal impact", "Quick turnaround"],
|
||||
tradeoffs: "Doesn't address underlying issue"
|
||||
},
|
||||
verification: {
|
||||
unit_tests: ["test_bug_fix_basic"],
|
||||
integration_tests: [],
|
||||
manual_checks: ["Reproduce issue", "Verify fix"],
|
||||
success_metrics: ["Issue resolved", "No regressions"]
|
||||
}
|
||||
}
|
||||
}
|
||||
} : {}),
|
||||
|
||||
_metadata: {
|
||||
timestamp: getUtc8ISOString(),
|
||||
source: "direct-planning",
|
||||
planning_mode: "direct",
|
||||
complexity: severity === "Medium" ? "Medium" : "Low"
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Merge task rationale into tasks array
|
||||
if (severity === "Medium") {
|
||||
fixPlan.tasks = fixPlan.tasks.map(task => ({
|
||||
...task,
|
||||
rationale: fixPlan.tasks_with_rationale[task.id]?.rationale || {
|
||||
chosen_approach: "Standard fix",
|
||||
alternatives_considered: [],
|
||||
decision_factors: ["Correctness", "Simplicity"],
|
||||
tradeoffs: "None"
|
||||
},
|
||||
verification: fixPlan.tasks_with_rationale[task.id]?.verification || {
|
||||
unit_tests: [`test_${task.id}_basic`],
|
||||
integration_tests: [],
|
||||
manual_checks: ["Verify fix works"],
|
||||
success_metrics: ["Test pass"]
|
||||
}
|
||||
}))
|
||||
delete fixPlan.tasks_with_rationale // Clean up temp field
|
||||
}
|
||||
|
||||
// Step 4: Write fix-plan to session folder
|
||||
Write(`${sessionFolder}/fix-plan.json`, JSON.stringify(fixPlan, null, 2))
|
||||
|
||||
// Step 5: MUST continue to Phase 4 (Confirmation) - DO NOT execute code here
|
||||
```
|
||||
|
||||
**High/Critical Severity** - Invoke cli-lite-planning-agent:
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-lite-planning-agent",
|
||||
run_in_background=false,
|
||||
description="Generate detailed fix plan",
|
||||
prompt=`
|
||||
Generate fix plan and write fix-plan.json.
|
||||
|
||||
## Output Schema Reference
|
||||
Execute: cat ~/.claude/workflows/cli-templates/schemas/fix-plan-json-schema.json (get schema reference before generating plan)
|
||||
|
||||
## Project Context (MANDATORY - Read Both Files)
|
||||
1. Read: .workflow/project-tech.json (technology stack, architecture, key components)
|
||||
2. Read: .workflow/project-guidelines.json (user-defined constraints and conventions)
|
||||
|
||||
**CRITICAL**: All fix tasks MUST comply with constraints in project-guidelines.json
|
||||
|
||||
## Bug Description
|
||||
${bug_description}
|
||||
|
||||
## Multi-Angle Diagnosis Context
|
||||
|
||||
${manifest.diagnoses.map(diag => `### Diagnosis: ${diag.angle} (${diag.file})
|
||||
Path: ${diag.path}
|
||||
|
||||
Read this file for detailed ${diag.angle} analysis.`).join('\n\n')}
|
||||
|
||||
Total diagnoses: ${manifest.diagnosis_count}
|
||||
Angles covered: ${manifest.diagnoses.map(d => d.angle).join(', ')}
|
||||
|
||||
Manifest: ${sessionFolder}/diagnoses-manifest.json
|
||||
|
||||
## User Clarifications
|
||||
${JSON.stringify(clarificationContext) || "None"}
|
||||
|
||||
## Severity Level
|
||||
${severity}
|
||||
|
||||
## Requirements
|
||||
Generate fix-plan.json with:
|
||||
- summary: 2-3 sentence overview of the fix
|
||||
- root_cause: Consolidated root cause from all diagnoses
|
||||
- strategy: "immediate_patch" | "comprehensive_fix" | "refactor"
|
||||
- tasks: 1-5 structured fix tasks (**IMPORTANT: group by fix area, NOT by file**)
|
||||
- **Task Granularity Principle**: Each task = one complete fix unit
|
||||
- title: action verb + target (e.g., "Fix token validation edge case")
|
||||
- scope: module path (src/auth/) or feature name
|
||||
- action: "Fix" | "Update" | "Refactor" | "Add" | "Delete"
|
||||
- description
|
||||
- modification_points: ALL files to modify for this fix (group related changes)
|
||||
- implementation (2-5 steps covering all modification_points)
|
||||
- acceptance: Quantified acceptance criteria
|
||||
- depends_on: task IDs this task depends on (use sparingly)
|
||||
|
||||
**High/Critical complexity fields per task** (REQUIRED):
|
||||
- rationale:
|
||||
- chosen_approach: Why this fix approach (not alternatives)
|
||||
- alternatives_considered: Other approaches evaluated
|
||||
- decision_factors: Key factors influencing choice
|
||||
- tradeoffs: Known tradeoffs of this approach
|
||||
- verification:
|
||||
- unit_tests: Test names to add/verify
|
||||
- integration_tests: Integration test names
|
||||
- manual_checks: Manual verification steps
|
||||
- success_metrics: Quantified success criteria
|
||||
- risks:
|
||||
- description: Risk description
|
||||
- probability: Low|Medium|High
|
||||
- impact: Low|Medium|High
|
||||
- mitigation: How to mitigate
|
||||
- fallback: Fallback if fix fails
|
||||
- code_skeleton (optional): Key interfaces/functions to implement
|
||||
- interfaces: [{name, definition, purpose}]
|
||||
- key_functions: [{signature, purpose, returns}]
|
||||
|
||||
**Top-level High/Critical fields** (REQUIRED):
|
||||
- data_flow: How data flows through affected code
|
||||
- diagram: "A → B → C" style flow
|
||||
- stages: [{stage, input, output, component}]
|
||||
- design_decisions: Global fix decisions
|
||||
- [{decision, rationale, tradeoff}]
|
||||
|
||||
- estimated_time, recommended_execution, severity, risk_level
|
||||
- _metadata:
|
||||
- timestamp, source, planning_mode
|
||||
- complexity: "High" | "Critical"
|
||||
- diagnosis_angles: ${JSON.stringify(manifest.diagnoses.map(d => d.angle))}
|
||||
|
||||
## Task Grouping Rules
|
||||
1. **Group by fix area**: All changes for one fix = one task (even if 2-3 files)
|
||||
2. **Avoid file-per-task**: Do NOT create separate tasks for each file
|
||||
3. **Substantial tasks**: Each task should represent 10-45 minutes of work
|
||||
4. **True dependencies only**: Only use depends_on when Task B cannot start without Task A's output
|
||||
5. **Prefer parallel**: Most tasks should be independent (no depends_on)
|
||||
|
||||
## Execution
|
||||
1. Read ALL diagnosis files for comprehensive context
|
||||
2. Execute CLI planning using Gemini (Qwen fallback) with --rule planning-fix-strategy template
|
||||
3. Synthesize findings from multiple diagnosis angles
|
||||
4. Generate fix-plan with:
|
||||
- For High/Critical: REQUIRED new fields (rationale, verification, risks, code_skeleton, data_flow, design_decisions)
|
||||
- Each task MUST have rationale (why this fix), verification (how to verify success), and risks (potential issues)
|
||||
5. Parse output and structure fix-plan
|
||||
6. Write JSON: Write('${sessionFolder}/fix-plan.json', jsonContent)
|
||||
7. Return brief completion summary
|
||||
|
||||
## Output Format for CLI
|
||||
Include these sections in your fix-plan output:
|
||||
- Summary, Root Cause, Strategy (existing)
|
||||
- Data Flow: Diagram showing affected code paths
|
||||
- Design Decisions: Key architectural choices in the fix
|
||||
- Tasks: Each with rationale (Medium/High), verification (Medium/High), risks (High), code_skeleton (High)
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Output**: `${sessionFolder}/fix-plan.json`
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Task Confirmation & Execution Selection
|
||||
|
||||
**Step 4.1: Display Fix Plan**
|
||||
```javascript
|
||||
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
||||
|
||||
console.log(`
|
||||
## Fix Plan
|
||||
|
||||
**Summary**: ${fixPlan.summary}
|
||||
**Root Cause**: ${fixPlan.root_cause}
|
||||
**Strategy**: ${fixPlan.strategy}
|
||||
|
||||
**Tasks** (${fixPlan.tasks.length}):
|
||||
${fixPlan.tasks.map((t, i) => `${i+1}. ${t.title} (${t.scope})`).join('\n')}
|
||||
|
||||
**Severity**: ${fixPlan.severity}
|
||||
**Risk Level**: ${fixPlan.risk_level}
|
||||
**Estimated Time**: ${fixPlan.estimated_time}
|
||||
**Recommended**: ${fixPlan.recommended_execution}
|
||||
`)
|
||||
```
|
||||
|
||||
**Step 4.2: Collect Confirmation**
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
let userSelection
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Use defaults
|
||||
console.log(`[--yes] Auto-confirming fix plan:`)
|
||||
console.log(` - Confirmation: Allow`)
|
||||
console.log(` - Execution: Auto`)
|
||||
console.log(` - Review: Skip`)
|
||||
|
||||
userSelection = {
|
||||
confirmation: "Allow",
|
||||
execution_method: "Auto",
|
||||
code_review_tool: "Skip"
|
||||
}
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
userSelection = AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: `Confirm fix plan? (${fixPlan.tasks.length} tasks, ${fixPlan.severity} severity)`,
|
||||
header: "Confirm",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Allow", description: "Proceed as-is" },
|
||||
{ label: "Modify", description: "Adjust before execution" },
|
||||
{ label: "Cancel", description: "Abort workflow" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: `Auto: ${fixPlan.severity === 'Low' ? 'Agent' : 'Codex'}` }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after fix?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Gemini Review", description: "Gemini CLI" },
|
||||
{ label: "Agent Review", description: "@code-reviewer" },
|
||||
{ label: "Skip", description: "No review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Execute to Execution
|
||||
|
||||
**CRITICAL**: lite-fix NEVER executes code directly. ALL execution MUST go through lite-execute.
|
||||
|
||||
**Step 5.1: Build executionContext**
|
||||
|
||||
```javascript
|
||||
// Load manifest and all diagnosis files
|
||||
const manifest = JSON.parse(Read(`${sessionFolder}/diagnoses-manifest.json`))
|
||||
const diagnoses = {}
|
||||
|
||||
manifest.diagnoses.forEach(diag => {
|
||||
if (file_exists(diag.path)) {
|
||||
diagnoses[diag.angle] = JSON.parse(Read(diag.path))
|
||||
}
|
||||
})
|
||||
|
||||
const fixPlan = JSON.parse(Read(`${sessionFolder}/fix-plan.json`))
|
||||
|
||||
executionContext = {
|
||||
mode: "bugfix",
|
||||
severity: fixPlan.severity,
|
||||
planObject: {
|
||||
...fixPlan,
|
||||
// Ensure complexity is set based on severity for new field consumption
|
||||
complexity: fixPlan.complexity || (fixPlan.severity === 'Critical' ? 'High' : (fixPlan.severity === 'High' ? 'High' : 'Medium'))
|
||||
},
|
||||
diagnosisContext: diagnoses,
|
||||
diagnosisAngles: manifest.diagnoses.map(d => d.angle),
|
||||
diagnosisManifest: manifest,
|
||||
clarificationContext: clarificationContext || null,
|
||||
executionMethod: userSelection.execution_method,
|
||||
codeReviewTool: userSelection.code_review_tool,
|
||||
originalUserInput: bug_description,
|
||||
session: {
|
||||
id: sessionId,
|
||||
folder: sessionFolder,
|
||||
artifacts: {
|
||||
diagnoses: manifest.diagnoses.map(diag => ({
|
||||
angle: diag.angle,
|
||||
path: diag.path
|
||||
})),
|
||||
diagnoses_manifest: `${sessionFolder}/diagnoses-manifest.json`,
|
||||
fix_plan: `${sessionFolder}/fix-plan.json`
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 5.2: Execute**
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:lite-execute --in-memory --mode bugfix")
|
||||
```
|
||||
|
||||
## Session Folder Structure
|
||||
|
||||
```
|
||||
.workflow/.lite-fix/{bug-slug}-{YYYY-MM-DD}/
|
||||
|- diagnosis-{angle1}.json # Diagnosis angle 1
|
||||
|- diagnosis-{angle2}.json # Diagnosis angle 2
|
||||
|- diagnosis-{angle3}.json # Diagnosis angle 3 (if applicable)
|
||||
|- diagnosis-{angle4}.json # Diagnosis angle 4 (if applicable)
|
||||
|- diagnoses-manifest.json # Diagnosis index
|
||||
+- fix-plan.json # Fix plan
|
||||
```
|
||||
|
||||
**Example**:
|
||||
```
|
||||
.workflow/.lite-fix/user-avatar-upload-fails-413-2025-11-25-14-30-25/
|
||||
|- diagnosis-error-handling.json
|
||||
|- diagnosis-dataflow.json
|
||||
|- diagnosis-validation.json
|
||||
|- diagnoses-manifest.json
|
||||
+- fix-plan.json
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| Diagnosis agent failure | Skip diagnosis, continue with bug description only |
|
||||
| Planning agent failure | Fallback to direct planning by Claude |
|
||||
| Clarification timeout | Use diagnosis findings as-is |
|
||||
| Confirmation timeout | Save context, display resume instructions |
|
||||
| Modify loop > 3 times | Suggest breaking task or using /workflow:plan |
|
||||
| Root cause unclear | Extend diagnosis time or use broader angles |
|
||||
| Too complex for lite-fix | Escalate to /workflow:plan --mode bugfix |
|
||||
|
||||
|
||||
465
.claude/commands/workflow/lite-lite-lite.md
Normal file
465
.claude/commands/workflow/lite-lite-lite.md
Normal file
@@ -0,0 +1,465 @@
|
||||
---
|
||||
name: workflow:lite-lite-lite
|
||||
description: Ultra-lightweight multi-tool analysis and direct execution. No artifacts for simple tasks; auto-creates planning docs in .workflow/.scratchpad/ for complex tasks. Auto tool selection based on task analysis, user-driven iteration via AskUser.
|
||||
argument-hint: "[-y|--yes] <task description>"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Write(*), mcp__ace-tool__search_context(*), mcp__ccw-tools__write_file(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Skip clarification questions, auto-select tools, execute directly with recommended settings.
|
||||
|
||||
# Ultra-Lite Multi-Tool Workflow
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
/workflow:lite-lite-lite "Fix the login bug"
|
||||
/workflow:lite-lite-lite "Refactor payment module for multi-gateway support"
|
||||
```
|
||||
|
||||
**Core Philosophy**: Minimal friction, maximum velocity. Simple tasks = no artifacts. Complex tasks = lightweight planning doc in `.workflow/.scratchpad/`.
|
||||
|
||||
## Overview
|
||||
|
||||
**Complexity-aware workflow**: Clarify → Assess Complexity → Select Tools → Multi-Mode Analysis → Decision → Direct Execution
|
||||
|
||||
**vs multi-cli-plan**: No IMPL_PLAN.md, plan.json, synthesis.json - state in memory or lightweight scratchpad doc for complex tasks.
|
||||
|
||||
## Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Clarify Requirements → AskUser for missing details
|
||||
Phase 1.5: Assess Complexity → Determine if planning doc needed
|
||||
Phase 2: Select Tools (CLI → Mode → Agent) → 3-step selection
|
||||
Phase 3: Multi-Mode Analysis → Execute with --resume chaining
|
||||
Phase 4: User Decision → Execute / Refine / Change / Cancel
|
||||
Phase 5: Direct Execution → No plan files (simple) or scratchpad doc (complex)
|
||||
```
|
||||
|
||||
## Phase 1: Clarify Requirements
|
||||
|
||||
```javascript
|
||||
const taskDescription = $ARGUMENTS
|
||||
|
||||
if (taskDescription.length < 20 || isAmbiguous(taskDescription)) {
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Please provide more details: target files/modules, expected behavior, constraints?",
|
||||
header: "Details",
|
||||
options: [
|
||||
{ label: "I'll provide more", description: "Add more context" },
|
||||
{ label: "Continue analysis", description: "Let tools explore autonomously" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
// Optional: Quick ACE Context for complex tasks
|
||||
mcp__ace-tool__search_context({
|
||||
project_root_path: process.cwd(),
|
||||
query: `${taskDescription} implementation patterns`
|
||||
})
|
||||
```
|
||||
|
||||
## Phase 1.5: Assess Complexity
|
||||
|
||||
| Level | Creates Plan Doc | Trigger Keywords |
|
||||
|-------|------------------|------------------|
|
||||
| **simple** | ❌ | (default) |
|
||||
| **moderate** | ✅ | module, system, service, integration, multiple |
|
||||
| **complex** | ✅ | refactor, migrate, security, auth, payment, database |
|
||||
|
||||
```javascript
|
||||
// Complexity detection (after ACE query)
|
||||
const isComplex = /refactor|migrate|security|auth|payment|database/i.test(taskDescription)
|
||||
const isModerate = /module|system|service|integration|multiple/i.test(taskDescription) || aceContext?.relevant_files?.length > 2
|
||||
|
||||
if (isComplex || isModerate) {
|
||||
const planPath = `.workflow/.scratchpad/lite3-${taskSlug}-${dateStr}.md`
|
||||
// Create planning doc with: Task, Status, Complexity, Analysis Summary, Execution Plan, Progress Log
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 2: Select Tools
|
||||
|
||||
### Tool Definitions
|
||||
|
||||
**CLI Tools** (from cli-tools.json):
|
||||
```javascript
|
||||
const cliConfig = JSON.parse(Read("~/.claude/cli-tools.json"))
|
||||
const cliTools = Object.entries(cliConfig.tools)
|
||||
.filter(([_, config]) => config.enabled)
|
||||
.map(([name, config]) => ({
|
||||
name, type: 'cli',
|
||||
tags: config.tags || [],
|
||||
model: config.primaryModel,
|
||||
toolType: config.type // builtin, cli-wrapper, api-endpoint
|
||||
}))
|
||||
```
|
||||
|
||||
**Sub Agents**:
|
||||
|
||||
| Agent | Strengths | canExecute |
|
||||
|-------|-----------|------------|
|
||||
| **code-developer** | Code implementation, test writing | ✅ |
|
||||
| **Explore** | Fast code exploration, pattern discovery | ❌ |
|
||||
| **cli-explore-agent** | Dual-source analysis (Bash+CLI) | ❌ |
|
||||
| **cli-discuss-agent** | Multi-CLI collaboration, cross-verification | ❌ |
|
||||
| **debug-explore-agent** | Hypothesis-driven debugging | ❌ |
|
||||
| **context-search-agent** | Multi-layer file discovery, dependency analysis | ❌ |
|
||||
| **test-fix-agent** | Test execution, failure diagnosis, code fixing | ✅ |
|
||||
| **universal-executor** | General execution, multi-domain adaptation | ✅ |
|
||||
|
||||
**Analysis Modes**:
|
||||
|
||||
| Mode | Pattern | Use Case | minCLIs |
|
||||
|------|---------|----------|---------|
|
||||
| **Parallel** | `A \|\| B \|\| C → Aggregate` | Fast multi-perspective | 1+ |
|
||||
| **Sequential** | `A → B(resume) → C(resume)` | Incremental deepening | 2+ |
|
||||
| **Collaborative** | `A → B → A → B → Synthesize` | Multi-round refinement | 2+ |
|
||||
| **Debate** | `A(propose) → B(challenge) → A(defend)` | Adversarial validation | 2 |
|
||||
| **Challenge** | `A(analyze) → B(challenge)` | Find flaws and risks | 2 |
|
||||
|
||||
### Three-Step Selection Flow
|
||||
|
||||
```javascript
|
||||
// Step 1: Select CLIs (multiSelect)
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Select CLI tools for analysis (1-3 for collaboration modes)",
|
||||
header: "CLI Tools",
|
||||
options: cliTools.map(cli => ({
|
||||
label: cli.name,
|
||||
description: cli.tags.length > 0 ? cli.tags.join(', ') : cli.model || 'general'
|
||||
})),
|
||||
multiSelect: true
|
||||
}]
|
||||
})
|
||||
|
||||
// Step 2: Select Mode (filtered by CLI count)
|
||||
const availableModes = analysisModes.filter(m => selectedCLIs.length >= m.minCLIs)
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Select analysis mode",
|
||||
header: "Mode",
|
||||
options: availableModes.map(m => ({
|
||||
label: m.label,
|
||||
description: `${m.description} [${m.pattern}]`
|
||||
})),
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
// Step 3: Select Agent for execution
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Select Sub Agent for execution",
|
||||
header: "Agent",
|
||||
options: agents.map(a => ({ label: a.name, description: a.strength })),
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
// Confirm selection
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Confirm selection?",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "Confirm and continue", description: `${selectedMode.label} with ${selectedCLIs.length} CLIs` },
|
||||
{ label: "Re-select CLIs", description: "Choose different CLI tools" },
|
||||
{ label: "Re-select Mode", description: "Choose different analysis mode" },
|
||||
{ label: "Re-select Agent", description: "Choose different Sub Agent" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
```
|
||||
|
||||
## Phase 3: Multi-Mode Analysis
|
||||
|
||||
### Universal CLI Prompt Template
|
||||
|
||||
```javascript
|
||||
// Unified prompt builder - used by all modes
|
||||
function buildPrompt({ purpose, tasks, expected, rules, taskDescription }) {
|
||||
return `
|
||||
PURPOSE: ${purpose}: ${taskDescription}
|
||||
TASK: ${tasks.map(t => `• ${t}`).join(' ')}
|
||||
MODE: analysis
|
||||
CONTEXT: @**/*
|
||||
EXPECTED: ${expected}
|
||||
CONSTRAINTS: ${rules}
|
||||
`
|
||||
}
|
||||
|
||||
// Execute CLI with prompt
|
||||
function execCLI(cli, prompt, options = {}) {
|
||||
const { resume, background = false } = options
|
||||
const resumeFlag = resume ? `--resume ${resume}` : ''
|
||||
return Bash({
|
||||
command: `ccw cli -p "${prompt}" --tool ${cli.name} --mode analysis ${resumeFlag}`,
|
||||
run_in_background: background
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Prompt Presets by Role
|
||||
|
||||
| Role | PURPOSE | TASKS | EXPECTED | RULES |
|
||||
|------|---------|-------|----------|-------|
|
||||
| **initial** | Initial analysis | Identify files, Analyze approach, List changes | Root cause, files, changes, risks | Focus on actionable insights |
|
||||
| **extend** | Build on previous | Review previous, Extend, Add insights | Extended analysis building on findings | Build incrementally, avoid repetition |
|
||||
| **synthesize** | Refine and synthesize | Review, Identify gaps, Synthesize | Refined synthesis with new perspectives | Add value not repetition |
|
||||
| **propose** | Propose comprehensive analysis | Analyze thoroughly, Propose solution, State assumptions | Well-reasoned proposal with trade-offs | Be clear about assumptions |
|
||||
| **challenge** | Challenge and stress-test | Identify weaknesses, Question assumptions, Suggest alternatives | Critique with counter-arguments | Be adversarial but constructive |
|
||||
| **defend** | Respond to challenges | Address challenges, Defend valid aspects, Propose refined solution | Refined proposal incorporating feedback | Be open to criticism, synthesize |
|
||||
| **criticize** | Find flaws ruthlessly | Find logical flaws, Identify edge cases, Rate criticisms | Critique with severity: [CRITICAL]/[HIGH]/[MEDIUM]/[LOW] | Be ruthlessly critical |
|
||||
|
||||
```javascript
|
||||
const PROMPTS = {
|
||||
initial: { purpose: 'Initial analysis', tasks: ['Identify affected files', 'Analyze implementation approach', 'List specific changes'], expected: 'Root cause, files to modify, key changes, risks', rules: 'Focus on actionable insights' },
|
||||
extend: { purpose: 'Build on previous analysis', tasks: ['Review previous findings', 'Extend analysis', 'Add new insights'], expected: 'Extended analysis building on previous', rules: 'Build incrementally, avoid repetition' },
|
||||
synthesize: { purpose: 'Refine and synthesize', tasks: ['Review previous', 'Identify gaps', 'Add insights', 'Synthesize findings'], expected: 'Refined synthesis with new perspectives', rules: 'Build collaboratively, add value' },
|
||||
propose: { purpose: 'Propose comprehensive analysis', tasks: ['Analyze thoroughly', 'Propose solution', 'State assumptions clearly'], expected: 'Well-reasoned proposal with trade-offs', rules: 'Be clear about assumptions' },
|
||||
challenge: { purpose: 'Challenge and stress-test', tasks: ['Identify weaknesses', 'Question assumptions', 'Suggest alternatives', 'Highlight overlooked risks'], expected: 'Constructive critique with counter-arguments', rules: 'Be adversarial but constructive' },
|
||||
defend: { purpose: 'Respond to challenges', tasks: ['Address each challenge', 'Defend valid aspects', 'Acknowledge valid criticisms', 'Propose refined solution'], expected: 'Refined proposal incorporating alternatives', rules: 'Be open to criticism, synthesize best ideas' },
|
||||
criticize: { purpose: 'Stress-test and find weaknesses', tasks: ['Find logical flaws', 'Identify missed edge cases', 'Propose alternatives', 'Rate criticisms (High/Medium/Low)'], expected: 'Detailed critique with severity ratings', rules: 'Be ruthlessly critical, find every flaw' }
|
||||
}
|
||||
```
|
||||
|
||||
### Mode Implementations
|
||||
|
||||
```javascript
|
||||
// Parallel: All CLIs run simultaneously
|
||||
async function executeParallel(clis, task) {
|
||||
return await Promise.all(clis.map(cli =>
|
||||
execCLI(cli, buildPrompt({ ...PROMPTS.initial, taskDescription: task }), { background: true })
|
||||
))
|
||||
}
|
||||
|
||||
// Sequential: Each CLI builds on previous via --resume
|
||||
async function executeSequential(clis, task) {
|
||||
const results = []
|
||||
let prevId = null
|
||||
for (const cli of clis) {
|
||||
const preset = prevId ? PROMPTS.extend : PROMPTS.initial
|
||||
const result = await execCLI(cli, buildPrompt({ ...preset, taskDescription: task }), { resume: prevId })
|
||||
results.push(result)
|
||||
prevId = extractSessionId(result)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Collaborative: Multi-round synthesis
|
||||
async function executeCollaborative(clis, task, rounds = 2) {
|
||||
const results = []
|
||||
let prevId = null
|
||||
for (let r = 0; r < rounds; r++) {
|
||||
for (const cli of clis) {
|
||||
const preset = !prevId ? PROMPTS.initial : PROMPTS.synthesize
|
||||
const result = await execCLI(cli, buildPrompt({ ...preset, taskDescription: task }), { resume: prevId })
|
||||
results.push({ cli: cli.name, round: r, result })
|
||||
prevId = extractSessionId(result)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// Debate: Propose → Challenge → Defend
|
||||
async function executeDebate(clis, task) {
|
||||
const [cliA, cliB] = clis
|
||||
const results = []
|
||||
|
||||
const propose = await execCLI(cliA, buildPrompt({ ...PROMPTS.propose, taskDescription: task }))
|
||||
results.push({ phase: 'propose', cli: cliA.name, result: propose })
|
||||
|
||||
const challenge = await execCLI(cliB, buildPrompt({ ...PROMPTS.challenge, taskDescription: task }), { resume: extractSessionId(propose) })
|
||||
results.push({ phase: 'challenge', cli: cliB.name, result: challenge })
|
||||
|
||||
const defend = await execCLI(cliA, buildPrompt({ ...PROMPTS.defend, taskDescription: task }), { resume: extractSessionId(challenge) })
|
||||
results.push({ phase: 'defend', cli: cliA.name, result: defend })
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// Challenge: Analyze → Criticize
|
||||
async function executeChallenge(clis, task) {
|
||||
const [cliA, cliB] = clis
|
||||
const results = []
|
||||
|
||||
const analyze = await execCLI(cliA, buildPrompt({ ...PROMPTS.initial, taskDescription: task }))
|
||||
results.push({ phase: 'analyze', cli: cliA.name, result: analyze })
|
||||
|
||||
const criticize = await execCLI(cliB, buildPrompt({ ...PROMPTS.criticize, taskDescription: task }), { resume: extractSessionId(analyze) })
|
||||
results.push({ phase: 'challenge', cli: cliB.name, result: criticize })
|
||||
|
||||
return results
|
||||
}
|
||||
```
|
||||
|
||||
### Mode Router & Result Aggregation
|
||||
|
||||
```javascript
|
||||
async function executeAnalysis(mode, clis, taskDescription) {
|
||||
switch (mode.name) {
|
||||
case 'parallel': return await executeParallel(clis, taskDescription)
|
||||
case 'sequential': return await executeSequential(clis, taskDescription)
|
||||
case 'collaborative': return await executeCollaborative(clis, taskDescription)
|
||||
case 'debate': return await executeDebate(clis, taskDescription)
|
||||
case 'challenge': return await executeChallenge(clis, taskDescription)
|
||||
}
|
||||
}
|
||||
|
||||
function aggregateResults(mode, results) {
|
||||
const base = { mode: mode.name, pattern: mode.pattern, tools_used: results.map(r => r.cli || 'unknown') }
|
||||
|
||||
switch (mode.name) {
|
||||
case 'parallel':
|
||||
return { ...base, findings: results.map(parseOutput), consensus: findCommonPoints(results), divergences: findDifferences(results) }
|
||||
case 'sequential':
|
||||
return { ...base, evolution: results.map((r, i) => ({ step: i + 1, analysis: parseOutput(r) })), finalAnalysis: parseOutput(results.at(-1)) }
|
||||
case 'collaborative':
|
||||
return { ...base, rounds: groupByRound(results), synthesis: extractSynthesis(results.at(-1)) }
|
||||
case 'debate':
|
||||
return { ...base, proposal: parseOutput(results.find(r => r.phase === 'propose')?.result),
|
||||
challenges: parseOutput(results.find(r => r.phase === 'challenge')?.result),
|
||||
resolution: parseOutput(results.find(r => r.phase === 'defend')?.result), confidence: calculateDebateConfidence(results) }
|
||||
case 'challenge':
|
||||
return { ...base, originalAnalysis: parseOutput(results.find(r => r.phase === 'analyze')?.result),
|
||||
critiques: parseCritiques(results.find(r => r.phase === 'challenge')?.result), riskScore: calculateRiskScore(results) }
|
||||
}
|
||||
}
|
||||
|
||||
// If planPath exists: update Analysis Summary & Execution Plan sections
|
||||
```
|
||||
|
||||
## Phase 4: User Decision
|
||||
|
||||
```javascript
|
||||
function presentSummary(analysis) {
|
||||
console.log(`## Analysis Result\n**Mode**: ${analysis.mode} (${analysis.pattern})\n**Tools**: ${analysis.tools_used.join(' → ')}`)
|
||||
|
||||
switch (analysis.mode) {
|
||||
case 'parallel':
|
||||
console.log(`### Consensus\n${analysis.consensus.map(c => `- ${c}`).join('\n')}\n### Divergences\n${analysis.divergences.map(d => `- ${d}`).join('\n')}`)
|
||||
break
|
||||
case 'sequential':
|
||||
console.log(`### Evolution\n${analysis.evolution.map(e => `**Step ${e.step}**: ${e.analysis.summary}`).join('\n')}\n### Final\n${analysis.finalAnalysis.summary}`)
|
||||
break
|
||||
case 'collaborative':
|
||||
console.log(`### Rounds\n${Object.entries(analysis.rounds).map(([r, a]) => `**Round ${r}**: ${a.map(x => x.cli).join(' + ')}`).join('\n')}\n### Synthesis\n${analysis.synthesis}`)
|
||||
break
|
||||
case 'debate':
|
||||
console.log(`### Debate\n**Proposal**: ${analysis.proposal.summary}\n**Challenges**: ${analysis.challenges.points?.length || 0} points\n**Resolution**: ${analysis.resolution.summary}\n**Confidence**: ${analysis.confidence}%`)
|
||||
break
|
||||
case 'challenge':
|
||||
console.log(`### Challenge\n**Original**: ${analysis.originalAnalysis.summary}\n**Critiques**: ${analysis.critiques.length} issues\n${analysis.critiques.map(c => `- [${c.severity}] ${c.description}`).join('\n')}\n**Risk Score**: ${analysis.riskScore}/100`)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "How to proceed?",
|
||||
header: "Next Step",
|
||||
options: [
|
||||
{ label: "Execute directly", description: "Implement immediately" },
|
||||
{ label: "Refine analysis", description: "Add constraints, re-analyze" },
|
||||
{ label: "Change tools", description: "Different tool combination" },
|
||||
{ label: "Cancel", description: "End workflow" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
// If planPath exists: record decision to Decisions Made table
|
||||
// Routing: Execute → Phase 5 | Refine → Phase 3 | Change → Phase 2 | Cancel → End
|
||||
```
|
||||
|
||||
## Phase 5: Direct Execution
|
||||
|
||||
```javascript
|
||||
// Simple tasks: No artifacts | Complex tasks: Update scratchpad doc
|
||||
const executionAgents = agents.filter(a => a.canExecute)
|
||||
const executionTool = selectedAgent.canExecute ? selectedAgent : selectedCLIs[0]
|
||||
|
||||
if (executionTool.type === 'agent') {
|
||||
Task({
|
||||
subagent_type: executionTool.name,
|
||||
run_in_background: false,
|
||||
description: `Execute: ${taskDescription.slice(0, 30)}`,
|
||||
prompt: `## Task\n${taskDescription}\n\n## Analysis Results\n${JSON.stringify(aggregatedAnalysis, null, 2)}\n\n## Instructions\n1. Apply changes to identified files\n2. Follow recommended approach\n3. Handle identified risks\n4. Verify changes work correctly`
|
||||
})
|
||||
} else {
|
||||
Bash({
|
||||
command: `ccw cli -p "
|
||||
PURPOSE: Implement solution: ${taskDescription}
|
||||
TASK: ${extractedTasks.join(' • ')}
|
||||
MODE: write
|
||||
CONTEXT: @${affectedFiles.join(' @')}
|
||||
EXPECTED: Working implementation with all changes applied
|
||||
CONSTRAINTS: Follow existing patterns
|
||||
" --tool ${executionTool.name} --mode write`,
|
||||
run_in_background: false
|
||||
})
|
||||
}
|
||||
// If planPath exists: update Status to completed/failed, append to Progress Log
|
||||
```
|
||||
|
||||
## TodoWrite Structure
|
||||
|
||||
```javascript
|
||||
TodoWrite({ todos: [
|
||||
{ content: "Phase 1: Clarify requirements", status: "in_progress", activeForm: "Clarifying requirements" },
|
||||
{ content: "Phase 1.5: Assess complexity", status: "pending", activeForm: "Assessing complexity" },
|
||||
{ content: "Phase 2: Select tools", status: "pending", activeForm: "Selecting tools" },
|
||||
{ content: "Phase 3: Multi-mode analysis", status: "pending", activeForm: "Running analysis" },
|
||||
{ content: "Phase 4: User decision", status: "pending", activeForm: "Awaiting decision" },
|
||||
{ content: "Phase 5: Direct execution", status: "pending", activeForm: "Executing" }
|
||||
]})
|
||||
```
|
||||
|
||||
## Iteration Patterns
|
||||
|
||||
| Pattern | Flow |
|
||||
|---------|------|
|
||||
| **Direct** | Phase 1 → 2 → 3 → 4(execute) → 5 |
|
||||
| **Refinement** | Phase 3 → 4(refine) → 3 → 4 → 5 |
|
||||
| **Tool Adjust** | Phase 2(adjust) → 3 → 4 → 5 |
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| CLI timeout | Retry with secondary model |
|
||||
| No enabled tools | Ask user to enable tools in cli-tools.json |
|
||||
| Task unclear | Default to first CLI + code-developer |
|
||||
| Ambiguous task | Force clarification via AskUser |
|
||||
| Execution fails | Present error, ask user for direction |
|
||||
| Plan doc write fails | Continue without doc (degrade to zero-artifact mode) |
|
||||
| Scratchpad dir missing | Auto-create `.workflow/.scratchpad/` |
|
||||
|
||||
## Comparison with multi-cli-plan
|
||||
|
||||
| Aspect | lite-lite-lite | multi-cli-plan |
|
||||
|--------|----------------|----------------|
|
||||
| **Artifacts** | Conditional (scratchpad doc for complex tasks) | Always (IMPL_PLAN.md, plan.json, synthesis.json) |
|
||||
| **Session** | Stateless (--resume chaining) | Persistent session folder |
|
||||
| **Tool Selection** | 3-step (CLI → Mode → Agent) | Config-driven fixed tools |
|
||||
| **Analysis Modes** | 5 modes with --resume | Fixed synthesis rounds |
|
||||
| **Complexity** | Auto-detected (simple/moderate/complex) | Assumed complex |
|
||||
| **Best For** | Quick analysis, simple-to-moderate tasks | Complex multi-step implementations |
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
## Related Commands
|
||||
|
||||
```bash
|
||||
/workflow:multi-cli-plan "complex task" # Full planning workflow
|
||||
/workflow:lite-plan "task" # Single CLI planning
|
||||
/workflow:lite-execute --in-memory # Direct execution
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
572
.claude/commands/workflow/multi-cli-plan.md
Normal file
572
.claude/commands/workflow/multi-cli-plan.md
Normal file
@@ -0,0 +1,572 @@
|
||||
---
|
||||
name: workflow:multi-cli-plan
|
||||
description: Multi-CLI collaborative planning workflow with ACE context gathering and iterative cross-verification. Uses cli-discuss-agent for Gemini+Codex+Claude analysis to converge on optimal execution plan.
|
||||
argument-hint: "[-y|--yes] <task description> [--max-rounds=3] [--tools=gemini,codex] [--mode=parallel|serial]"
|
||||
allowed-tools: TodoWrite(*), Task(*), AskUserQuestion(*), Read(*), Bash(*), Write(*), mcp__ace-tool__search_context(*)
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-approve plan, use recommended solution and execution method (Agent, Skip review).
|
||||
|
||||
# Multi-CLI Collaborative Planning Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
/workflow:multi-cli-plan "Implement user authentication"
|
||||
|
||||
# With options
|
||||
/workflow:multi-cli-plan "Add dark mode support" --max-rounds=3
|
||||
/workflow:multi-cli-plan "Refactor payment module" --tools=gemini,codex,claude
|
||||
/workflow:multi-cli-plan "Fix memory leak" --mode=serial
|
||||
```
|
||||
|
||||
**Context Source**: ACE semantic search + Multi-CLI analysis
|
||||
**Output Directory**: `.workflow/.multi-cli-plan/{session-id}/`
|
||||
**Default Max Rounds**: 3 (convergence may complete earlier)
|
||||
**CLI Tools**: @cli-discuss-agent (analysis), @cli-lite-planning-agent (plan generation)
|
||||
**Execution**: Auto-hands off to `/workflow:lite-execute --in-memory` after plan approval
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
|
||||
Multi-CLI collaborative planning with **three-phase architecture**: ACE context gathering → Iterative multi-CLI discussion → Plan generation. Orchestrator delegates analysis to agents, only handles user decisions and session management.
|
||||
|
||||
**Process**:
|
||||
- **Phase 1**: ACE semantic search gathers codebase context
|
||||
- **Phase 2**: cli-discuss-agent orchestrates Gemini/Codex/Claude for cross-verified analysis
|
||||
- **Phase 3-5**: User decision → Plan generation → Execution handoff
|
||||
|
||||
**vs Single-CLI Planning**:
|
||||
- **Single**: One model perspective, potential blind spots
|
||||
- **Multi-CLI**: Cross-verification catches inconsistencies, builds consensus on solutions
|
||||
|
||||
### Value Proposition
|
||||
|
||||
1. **Multi-Perspective Analysis**: Gemini + Codex + Claude analyze from different angles
|
||||
2. **Cross-Verification**: Identify agreements/disagreements, build confidence
|
||||
3. **User-Driven Decisions**: Every round ends with user decision point
|
||||
4. **Iterative Convergence**: Progressive refinement until consensus reached
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
|
||||
- **ONLY command** for multi-CLI collaborative planning
|
||||
- Manages: Session state, user decisions, agent delegation, phase transitions
|
||||
- Delegates: CLI execution to @cli-discuss-agent, plan generation to @cli-lite-planning-agent
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Context Gathering
|
||||
└─ ACE semantic search, extract keywords, build context package
|
||||
|
||||
Phase 2: Multi-CLI Discussion (Iterative, via @cli-discuss-agent)
|
||||
├─ Round N: Agent executes Gemini + Codex + Claude
|
||||
├─ Cross-verify findings, synthesize solutions
|
||||
├─ Write synthesis.json to rounds/{N}/
|
||||
└─ Loop until convergence or max rounds
|
||||
|
||||
Phase 3: Present Options
|
||||
└─ Display solutions with trade-offs from agent output
|
||||
|
||||
Phase 4: User Decision
|
||||
├─ Select solution approach
|
||||
├─ Select execution method (Agent/Codex/Auto)
|
||||
├─ Select code review tool (Skip/Gemini/Codex/Agent)
|
||||
└─ Route:
|
||||
├─ Approve → Phase 5
|
||||
├─ Need More Analysis → Return to Phase 2
|
||||
└─ Cancel → Save session
|
||||
|
||||
Phase 5: Plan Generation & Execution Handoff
|
||||
├─ Generate plan.json (via @cli-lite-planning-agent)
|
||||
├─ Build executionContext with user selections
|
||||
└─ Execute to /workflow:lite-execute --in-memory
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Session management, ACE context, user decisions, phase transitions, executionContext assembly |
|
||||
| **@cli-discuss-agent** | Multi-CLI execution (Gemini/Codex/Claude), cross-verification, solution synthesis, synthesis.json output |
|
||||
| **@cli-lite-planning-agent** | Task decomposition, plan.json generation following schema |
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Phase 1: Context Gathering
|
||||
|
||||
**Session Initialization**:
|
||||
```javascript
|
||||
const sessionId = `MCP-${taskSlug}-${date}`
|
||||
const sessionFolder = `.workflow/.multi-cli-plan/${sessionId}`
|
||||
Bash(`mkdir -p ${sessionFolder}/rounds`)
|
||||
```
|
||||
|
||||
**ACE Context Queries**:
|
||||
```javascript
|
||||
const aceQueries = [
|
||||
`Project architecture related to ${keywords}`,
|
||||
`Existing implementations of ${keywords[0]}`,
|
||||
`Code patterns for ${keywords} features`,
|
||||
`Integration points for ${keywords[0]}`
|
||||
]
|
||||
// Execute via mcp__ace-tool__search_context
|
||||
```
|
||||
|
||||
**Context Package** (passed to agent):
|
||||
- `relevant_files[]` - Files identified by ACE
|
||||
- `detected_patterns[]` - Code patterns found
|
||||
- `architecture_insights` - Structure understanding
|
||||
|
||||
### Phase 2: Agent Delegation
|
||||
|
||||
**Core Principle**: Orchestrator only delegates and reads output - NO direct CLI execution.
|
||||
|
||||
**Agent Invocation**:
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-discuss-agent",
|
||||
run_in_background: false,
|
||||
description: `Discussion round ${currentRound}`,
|
||||
prompt: `
|
||||
## Input Context
|
||||
- task_description: ${taskDescription}
|
||||
- round_number: ${currentRound}
|
||||
- session: { id: "${sessionId}", folder: "${sessionFolder}" }
|
||||
- ace_context: ${JSON.stringify(contextPackageage)}
|
||||
- previous_rounds: ${JSON.stringify(analysisResults)}
|
||||
- user_feedback: ${userFeedback || 'None'}
|
||||
- cli_config: { tools: ["gemini", "codex"], mode: "parallel", fallback_chain: ["gemini", "codex", "claude"] }
|
||||
|
||||
## Execution Process
|
||||
1. Parse input context (handle JSON strings)
|
||||
2. Check if ACE supplementary search needed
|
||||
3. Build CLI prompts with context
|
||||
4. Execute CLIs (parallel or serial per cli_config.mode)
|
||||
5. Parse CLI outputs, handle failures with fallback
|
||||
6. Perform cross-verification between CLI results
|
||||
7. Synthesize solutions, calculate scores
|
||||
8. Calculate convergence, generate clarification questions
|
||||
9. Write synthesis.json
|
||||
|
||||
## Output
|
||||
Write: ${sessionFolder}/rounds/${currentRound}/synthesis.json
|
||||
|
||||
## Completion Checklist
|
||||
- [ ] All configured CLI tools executed (or fallback triggered)
|
||||
- [ ] Cross-verification completed with agreements/disagreements
|
||||
- [ ] 2-3 solutions generated with file:line references
|
||||
- [ ] Convergence score calculated (0.0-1.0)
|
||||
- [ ] synthesis.json written with all Primary Fields
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Read Agent Output**:
|
||||
```javascript
|
||||
const synthesis = JSON.parse(Read(`${sessionFolder}/rounds/${round}/synthesis.json`))
|
||||
// Access top-level fields: solutions, convergence, cross_verification, clarification_questions
|
||||
```
|
||||
|
||||
**Convergence Decision**:
|
||||
```javascript
|
||||
if (synthesis.convergence.recommendation === 'converged') {
|
||||
// Proceed to Phase 3
|
||||
} else if (synthesis.convergence.recommendation === 'user_input_needed') {
|
||||
// Collect user feedback, return to Phase 2
|
||||
} else {
|
||||
// Continue to next round if new_insights && round < maxRounds
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Present Options
|
||||
|
||||
**Display from Agent Output** (no processing):
|
||||
```javascript
|
||||
console.log(`
|
||||
## Solution Options
|
||||
|
||||
${synthesis.solutions.map((s, i) => `
|
||||
**Option ${i+1}: ${s.name}**
|
||||
Source: ${s.source_cli.join(' + ')}
|
||||
Effort: ${s.effort} | Risk: ${s.risk}
|
||||
|
||||
Pros: ${s.pros.join(', ')}
|
||||
Cons: ${s.cons.join(', ')}
|
||||
|
||||
Files: ${s.affected_files.slice(0,3).map(f => `${f.file}:${f.line}`).join(', ')}
|
||||
`).join('\n')}
|
||||
|
||||
## Cross-Verification
|
||||
Agreements: ${synthesis.cross_verification.agreements.length}
|
||||
Disagreements: ${synthesis.cross_verification.disagreements.length}
|
||||
`)
|
||||
```
|
||||
|
||||
### Phase 4: User Decision
|
||||
|
||||
**Decision Options**:
|
||||
```javascript
|
||||
AskUserQuestion({
|
||||
questions: [
|
||||
{
|
||||
question: "Which solution approach?",
|
||||
header: "Solution",
|
||||
multiSelect: false,
|
||||
options: solutions.map((s, i) => ({
|
||||
label: `Option ${i+1}: ${s.name}`,
|
||||
description: `${s.effort} effort, ${s.risk} risk`
|
||||
})).concat([
|
||||
{ label: "Need More Analysis", description: "Return to Phase 2" }
|
||||
])
|
||||
},
|
||||
{
|
||||
question: "Execution method:",
|
||||
header: "Execution",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Agent", description: "@code-developer agent" },
|
||||
{ label: "Codex", description: "codex CLI tool" },
|
||||
{ label: "Auto", description: "Auto-select based on complexity" }
|
||||
]
|
||||
},
|
||||
{
|
||||
question: "Code review after execution?",
|
||||
header: "Review",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{ label: "Skip", description: "No review" },
|
||||
{ label: "Gemini Review", description: "Gemini CLI tool" },
|
||||
{ label: "Codex Review", description: "codex review --uncommitted" },
|
||||
{ label: "Agent Review", description: "Current agent review" }
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
```
|
||||
|
||||
**Routing**:
|
||||
- Approve + execution method → Phase 5
|
||||
- Need More Analysis → Phase 2 with feedback
|
||||
- Cancel → Save session for resumption
|
||||
|
||||
### Phase 5: Plan Generation & Execution Handoff
|
||||
|
||||
**Step 1: Build Context-Package** (Orchestrator responsibility):
|
||||
```javascript
|
||||
// Extract key information from user decision and synthesis
|
||||
const contextPackage = {
|
||||
// Core solution details
|
||||
solution: {
|
||||
name: selectedSolution.name,
|
||||
source_cli: selectedSolution.source_cli,
|
||||
feasibility: selectedSolution.feasibility,
|
||||
effort: selectedSolution.effort,
|
||||
risk: selectedSolution.risk,
|
||||
summary: selectedSolution.summary
|
||||
},
|
||||
// Implementation plan (tasks, flow, milestones)
|
||||
implementation_plan: selectedSolution.implementation_plan,
|
||||
// Dependencies
|
||||
dependencies: selectedSolution.dependencies || { internal: [], external: [] },
|
||||
// Technical concerns
|
||||
technical_concerns: selectedSolution.technical_concerns || [],
|
||||
// Consensus from cross-verification
|
||||
consensus: {
|
||||
agreements: synthesis.cross_verification.agreements,
|
||||
resolved_conflicts: synthesis.cross_verification.resolution
|
||||
},
|
||||
// User constraints (from Phase 4 feedback)
|
||||
constraints: userConstraints || [],
|
||||
// Task context
|
||||
task_description: taskDescription,
|
||||
session_id: sessionId
|
||||
}
|
||||
|
||||
// Write context-package for traceability
|
||||
Write(`${sessionFolder}/context-package.json`, JSON.stringify(contextPackage, null, 2))
|
||||
```
|
||||
|
||||
**Context-Package Schema**:
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `solution` | object | User-selected solution from synthesis |
|
||||
| `solution.name` | string | Solution identifier |
|
||||
| `solution.feasibility` | number | Viability score (0-1) |
|
||||
| `solution.summary` | string | Brief analysis summary |
|
||||
| `implementation_plan` | object | Task breakdown with flow and dependencies |
|
||||
| `implementation_plan.approach` | string | High-level technical strategy |
|
||||
| `implementation_plan.tasks[]` | array | Discrete tasks with id, name, depends_on, files |
|
||||
| `implementation_plan.execution_flow` | string | Task sequence (e.g., "T1 → T2 → T3") |
|
||||
| `implementation_plan.milestones` | string[] | Key checkpoints |
|
||||
| `dependencies` | object | Module and package dependencies |
|
||||
| `technical_concerns` | string[] | Risks and blockers |
|
||||
| `consensus` | object | Cross-verified agreements from multi-CLI |
|
||||
| `constraints` | string[] | User-specified constraints from Phase 4 |
|
||||
|
||||
```json
|
||||
{
|
||||
"solution": {
|
||||
"name": "Strategy Pattern Refactoring",
|
||||
"source_cli": ["gemini", "codex"],
|
||||
"feasibility": 0.88,
|
||||
"effort": "medium",
|
||||
"risk": "low",
|
||||
"summary": "Extract payment gateway interface, implement strategy pattern for multi-gateway support"
|
||||
},
|
||||
"implementation_plan": {
|
||||
"approach": "Define interface → Create concrete strategies → Implement factory → Migrate existing code",
|
||||
"tasks": [
|
||||
{"id": "T1", "name": "Define PaymentGateway interface", "depends_on": [], "files": [{"file": "src/types/payment.ts", "line": 1, "action": "create"}], "key_point": "Include all existing Stripe methods"},
|
||||
{"id": "T2", "name": "Implement StripeGateway", "depends_on": ["T1"], "files": [{"file": "src/payment/stripe.ts", "line": 1, "action": "create"}], "key_point": "Wrap existing logic"},
|
||||
{"id": "T3", "name": "Create GatewayFactory", "depends_on": ["T1"], "files": [{"file": "src/payment/factory.ts", "line": 1, "action": "create"}], "key_point": null},
|
||||
{"id": "T4", "name": "Migrate processor to use factory", "depends_on": ["T2", "T3"], "files": [{"file": "src/payment/processor.ts", "line": 45, "action": "modify"}], "key_point": "Backward compatible"}
|
||||
],
|
||||
"execution_flow": "T1 → (T2 | T3) → T4",
|
||||
"milestones": ["Interface defined", "Gateway implementations complete", "Migration done"]
|
||||
},
|
||||
"dependencies": {
|
||||
"internal": ["@/lib/payment-gateway", "@/types/payment"],
|
||||
"external": ["stripe@^14.0.0"]
|
||||
},
|
||||
"technical_concerns": ["Existing tests must pass", "No breaking API changes"],
|
||||
"consensus": {
|
||||
"agreements": ["Use strategy pattern", "Keep existing API"],
|
||||
"resolved_conflicts": "Factory over DI for simpler integration"
|
||||
},
|
||||
"constraints": ["backward compatible", "no breaking changes to PaymentResult type"],
|
||||
"task_description": "Refactor payment processing for multi-gateway support",
|
||||
"session_id": "MCP-payment-refactor-2026-01-14"
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Invoke Planning Agent**:
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-lite-planning-agent",
|
||||
run_in_background: false,
|
||||
description: "Generate implementation plan",
|
||||
prompt: `
|
||||
## Schema Reference
|
||||
Execute: cat ~/.claude/workflows/cli-templates/schemas/plan-json-schema.json
|
||||
|
||||
## Context-Package (from orchestrator)
|
||||
${JSON.stringify(contextPackage, null, 2)}
|
||||
|
||||
## Execution Process
|
||||
1. Read plan-json-schema.json for output structure
|
||||
2. Read project-tech.json and project-guidelines.json
|
||||
3. Parse context-package fields:
|
||||
- solution: name, feasibility, summary
|
||||
- implementation_plan: tasks[], execution_flow, milestones
|
||||
- dependencies: internal[], external[]
|
||||
- technical_concerns: risks/blockers
|
||||
- consensus: agreements, resolved_conflicts
|
||||
- constraints: user requirements
|
||||
4. Use implementation_plan.tasks[] as task foundation
|
||||
5. Preserve task dependencies (depends_on) and execution_flow
|
||||
6. Expand tasks with detailed acceptance criteria
|
||||
7. Generate plan.json following schema exactly
|
||||
|
||||
## Output
|
||||
- ${sessionFolder}/plan.json
|
||||
|
||||
## Completion Checklist
|
||||
- [ ] plan.json preserves task dependencies from implementation_plan
|
||||
- [ ] Task execution order follows execution_flow
|
||||
- [ ] Key_points reflected in task descriptions
|
||||
- [ ] User constraints applied to implementation
|
||||
- [ ] Acceptance criteria are testable
|
||||
- [ ] Schema fields match plan-json-schema.json exactly
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Step 3: Build executionContext**:
|
||||
```javascript
|
||||
// After plan.json is generated by cli-lite-planning-agent
|
||||
const plan = JSON.parse(Read(`${sessionFolder}/plan.json`))
|
||||
|
||||
// Build executionContext (same structure as lite-plan)
|
||||
executionContext = {
|
||||
planObject: plan,
|
||||
explorationsContext: null, // Multi-CLI doesn't use exploration files
|
||||
explorationAngles: [], // No exploration angles
|
||||
explorationManifest: null, // No manifest
|
||||
clarificationContext: null, // Store user feedback from Phase 2 if exists
|
||||
executionMethod: userSelection.execution_method, // From Phase 4
|
||||
codeReviewTool: userSelection.code_review_tool, // From Phase 4
|
||||
originalUserInput: taskDescription,
|
||||
|
||||
// Optional: Task-level executor assignments
|
||||
executorAssignments: null, // Could be enhanced in future
|
||||
|
||||
session: {
|
||||
id: sessionId,
|
||||
folder: sessionFolder,
|
||||
artifacts: {
|
||||
explorations: [], // No explorations in multi-CLI workflow
|
||||
explorations_manifest: null,
|
||||
plan: `${sessionFolder}/plan.json`,
|
||||
synthesis_rounds: Array.from({length: currentRound}, (_, i) =>
|
||||
`${sessionFolder}/rounds/${i+1}/synthesis.json`
|
||||
),
|
||||
context_package: `${sessionFolder}/context-package.json`
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Hand off to Execution**:
|
||||
```javascript
|
||||
// Execute to lite-execute with in-memory context
|
||||
SlashCommand("/workflow:lite-execute --in-memory")
|
||||
```
|
||||
|
||||
## Output File Structure
|
||||
|
||||
```
|
||||
.workflow/.multi-cli-plan/{MCP-task-slug-YYYY-MM-DD}/
|
||||
├── session-state.json # Session tracking (orchestrator)
|
||||
├── rounds/
|
||||
│ ├── 1/synthesis.json # Round 1 analysis (cli-discuss-agent)
|
||||
│ ├── 2/synthesis.json # Round 2 analysis (cli-discuss-agent)
|
||||
│ └── .../
|
||||
├── context-package.json # Extracted context for planning (orchestrator)
|
||||
└── plan.json # Structured plan (cli-lite-planning-agent)
|
||||
```
|
||||
|
||||
**File Producers**:
|
||||
|
||||
| File | Producer | Content |
|
||||
|------|----------|---------|
|
||||
| `session-state.json` | Orchestrator | Session metadata, rounds, decisions |
|
||||
| `rounds/*/synthesis.json` | cli-discuss-agent | Solutions, convergence, cross-verification |
|
||||
| `context-package.json` | Orchestrator | Extracted solution, dependencies, consensus for planning |
|
||||
| `plan.json` | cli-lite-planning-agent | Structured tasks for lite-execute |
|
||||
|
||||
## synthesis.json Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"round": 1,
|
||||
"solutions": [{
|
||||
"name": "Solution Name",
|
||||
"source_cli": ["gemini", "codex"],
|
||||
"feasibility": 0.85,
|
||||
"effort": "low|medium|high",
|
||||
"risk": "low|medium|high",
|
||||
"summary": "Brief analysis summary",
|
||||
"implementation_plan": {
|
||||
"approach": "High-level technical approach",
|
||||
"tasks": [
|
||||
{"id": "T1", "name": "Task", "depends_on": [], "files": [], "key_point": "..."}
|
||||
],
|
||||
"execution_flow": "T1 → T2 → T3",
|
||||
"milestones": ["Checkpoint 1", "Checkpoint 2"]
|
||||
},
|
||||
"dependencies": {"internal": [], "external": []},
|
||||
"technical_concerns": ["Risk 1", "Blocker 2"]
|
||||
}],
|
||||
"convergence": {
|
||||
"score": 0.85,
|
||||
"new_insights": false,
|
||||
"recommendation": "converged|continue|user_input_needed"
|
||||
},
|
||||
"cross_verification": {
|
||||
"agreements": [],
|
||||
"disagreements": [],
|
||||
"resolution": "..."
|
||||
},
|
||||
"clarification_questions": []
|
||||
}
|
||||
```
|
||||
|
||||
**Key Planning Fields**:
|
||||
|
||||
| Field | Purpose |
|
||||
|-------|---------|
|
||||
| `feasibility` | Viability score (0-1) |
|
||||
| `implementation_plan.tasks[]` | Discrete tasks with dependencies |
|
||||
| `implementation_plan.execution_flow` | Task sequence visualization |
|
||||
| `implementation_plan.milestones` | Key checkpoints |
|
||||
| `technical_concerns` | Risks and blockers |
|
||||
|
||||
**Note**: Solutions ranked by internal scoring (array order = priority)
|
||||
|
||||
## TodoWrite Structure
|
||||
|
||||
**Initialization**:
|
||||
```javascript
|
||||
TodoWrite({ todos: [
|
||||
{ content: "Phase 1: Context Gathering", status: "in_progress", activeForm: "Gathering context" },
|
||||
{ content: "Phase 2: Multi-CLI Discussion", status: "pending", activeForm: "Running discussion" },
|
||||
{ content: "Phase 3: Present Options", status: "pending", activeForm: "Presenting options" },
|
||||
{ content: "Phase 4: User Decision", status: "pending", activeForm: "Awaiting decision" },
|
||||
{ content: "Phase 5: Plan Generation", status: "pending", activeForm: "Generating plan" }
|
||||
]})
|
||||
```
|
||||
|
||||
**During Discussion Rounds**:
|
||||
```javascript
|
||||
TodoWrite({ todos: [
|
||||
{ content: "Phase 1: Context Gathering", status: "completed", activeForm: "Gathering context" },
|
||||
{ content: "Phase 2: Multi-CLI Discussion", status: "in_progress", activeForm: "Running discussion" },
|
||||
{ content: " → Round 1: Initial analysis", status: "completed", activeForm: "Analyzing" },
|
||||
{ content: " → Round 2: Deep verification", status: "in_progress", activeForm: "Verifying" },
|
||||
{ content: "Phase 3: Present Options", status: "pending", activeForm: "Presenting options" },
|
||||
// ...
|
||||
]})
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| ACE search fails | Fall back to Glob/Grep for file discovery |
|
||||
| Agent fails | Retry once, then present partial results |
|
||||
| CLI timeout (in agent) | Agent uses fallback: gemini → codex → claude |
|
||||
| No convergence | Present best options, flag uncertainty |
|
||||
| synthesis.json parse error | Request agent retry |
|
||||
| User cancels | Save session for later resumption |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Flag | Default | Description |
|
||||
|------|---------|-------------|
|
||||
| `--max-rounds` | 3 | Maximum discussion rounds |
|
||||
| `--tools` | gemini,codex | CLI tools for analysis |
|
||||
| `--mode` | parallel | Execution mode: parallel or serial |
|
||||
| `--auto-execute` | false | Auto-execute after approval |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Be Specific**: Detailed task descriptions improve ACE context quality
|
||||
2. **Provide Feedback**: Use clarification rounds to refine requirements
|
||||
3. **Trust Cross-Verification**: Multi-CLI consensus indicates high confidence
|
||||
4. **Review Trade-offs**: Consider pros/cons before selecting solution
|
||||
5. **Check synthesis.json**: Review agent output for detailed analysis
|
||||
6. **Iterate When Needed**: Don't hesitate to request more analysis
|
||||
|
||||
## Related Commands
|
||||
|
||||
```bash
|
||||
# Simpler single-round planning
|
||||
/workflow:lite-plan "task description"
|
||||
|
||||
# Issue-driven discovery
|
||||
/issue:discover-by-prompt "find issues"
|
||||
|
||||
# View session files
|
||||
cat .workflow/.multi-cli-plan/{session-id}/plan.json
|
||||
cat .workflow/.multi-cli-plan/{session-id}/rounds/1/synthesis.json
|
||||
cat .workflow/.multi-cli-plan/{session-id}/context-package.json
|
||||
|
||||
# Direct execution (if you have plan.json)
|
||||
/workflow:lite-execute plan.json
|
||||
```
|
||||
362
.claude/commands/workflow/plan-verify.md
Normal file
362
.claude/commands/workflow/plan-verify.md
Normal file
@@ -0,0 +1,362 @@
|
||||
---
|
||||
name: plan-verify
|
||||
description: Perform READ-ONLY verification analysis between IMPL_PLAN.md, task JSONs, and brainstorming artifacts. Generates structured report with quality gate recommendation. Does NOT modify any files.
|
||||
argument-hint: "[optional: --session session-id]"
|
||||
allowed-tools: Read(*), Write(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
## User Input
|
||||
|
||||
```text
|
||||
$ARGUMENTS
|
||||
```
|
||||
|
||||
You **MUST** consider the user input before proceeding (if not empty).
|
||||
|
||||
## Goal
|
||||
|
||||
Generate a comprehensive verification report that identifies inconsistencies, duplications, ambiguities, and underspecified items between action planning artifacts (`IMPL_PLAN.md`, `task.json`) and brainstorming artifacts (`role analysis documents`). This command MUST run only after `/workflow:plan` has successfully produced complete `IMPL_PLAN.md` and task JSON files.
|
||||
|
||||
**Output**: A structured Markdown report saved to `.workflow/active/WFS-{session}/.process/PLAN_VERIFICATION.md` containing:
|
||||
- Executive summary with quality gate recommendation
|
||||
- Detailed findings by severity (CRITICAL/HIGH/MEDIUM/LOW)
|
||||
- Requirements coverage analysis
|
||||
- Dependency integrity check
|
||||
- Synthesis alignment validation
|
||||
- Actionable remediation recommendations
|
||||
|
||||
## Operating Constraints
|
||||
|
||||
**STRICTLY READ-ONLY FOR SOURCE ARTIFACTS**:
|
||||
- **MUST NOT** modify `IMPL_PLAN.md`, any `task.json` files, or brainstorming artifacts
|
||||
- **MUST NOT** create or delete task files
|
||||
- **MUST ONLY** write the verification report to `.process/PLAN_VERIFICATION.md`
|
||||
|
||||
**Synthesis Authority**: The `role analysis documents` are **authoritative** for requirements and design decisions. Any conflicts between IMPL_PLAN/tasks and synthesis are automatically CRITICAL and require adjustment of the plan/tasks—not reinterpretation of requirements.
|
||||
|
||||
**Quality Gate Authority**: The verification report provides a binding recommendation (BLOCK_EXECUTION / PROCEED_WITH_FIXES / PROCEED_WITH_CAUTION / PROCEED) based on objective severity criteria. User MUST review critical/high issues before proceeding with implementation.
|
||||
|
||||
## Execution Steps
|
||||
|
||||
### 1. Initialize Analysis Context
|
||||
|
||||
```bash
|
||||
# Detect active workflow session
|
||||
IF --session parameter provided:
|
||||
session_id = provided session
|
||||
ELSE:
|
||||
# Auto-detect active session
|
||||
active_sessions = bash(find .workflow/active/ -name "WFS-*" -type d 2>/dev/null)
|
||||
IF active_sessions is empty:
|
||||
ERROR: "No active workflow session found. Use --session <session-id>"
|
||||
EXIT
|
||||
ELSE IF active_sessions has multiple entries:
|
||||
# Use most recently modified session
|
||||
session_id = bash(ls -td .workflow/active/WFS-*/ 2>/dev/null | head -1 | xargs basename)
|
||||
ELSE:
|
||||
session_id = basename(active_sessions[0])
|
||||
|
||||
# Derive absolute paths
|
||||
session_dir = .workflow/active/WFS-{session}
|
||||
brainstorm_dir = session_dir/.brainstorming
|
||||
task_dir = session_dir/.task
|
||||
process_dir = session_dir/.process
|
||||
session_file = session_dir/workflow-session.json
|
||||
|
||||
# Create .process directory if not exists (report output location)
|
||||
IF NOT EXISTS(process_dir):
|
||||
bash(mkdir -p "{process_dir}")
|
||||
|
||||
# Validate required artifacts
|
||||
# Note: "role analysis documents" refers to [role]/analysis.md files (e.g., product-manager/analysis.md)
|
||||
SYNTHESIS_DIR = brainstorm_dir # Contains role analysis files: */analysis.md
|
||||
IMPL_PLAN = session_dir/IMPL_PLAN.md
|
||||
TASK_FILES = Glob(task_dir/*.json)
|
||||
|
||||
# Abort if missing - in order of dependency
|
||||
SESSION_FILE_EXISTS = EXISTS(session_file)
|
||||
IF NOT SESSION_FILE_EXISTS:
|
||||
WARNING: "workflow-session.json not found. User intent alignment verification will be skipped."
|
||||
# Continue execution - this is optional context, not blocking
|
||||
|
||||
SYNTHESIS_FILES = Glob(brainstorm_dir/*/analysis.md)
|
||||
IF SYNTHESIS_FILES.count == 0:
|
||||
ERROR: "No role analysis documents found in .brainstorming/*/analysis.md. Run /workflow:brainstorm:synthesis first"
|
||||
EXIT
|
||||
|
||||
IF NOT EXISTS(IMPL_PLAN):
|
||||
ERROR: "IMPL_PLAN.md not found. Run /workflow:plan first"
|
||||
EXIT
|
||||
|
||||
IF TASK_FILES.count == 0:
|
||||
ERROR: "No task JSON files found. Run /workflow:plan first"
|
||||
EXIT
|
||||
```
|
||||
|
||||
### 2. Load Artifacts (Progressive Disclosure)
|
||||
|
||||
Load only minimal necessary context from each artifact:
|
||||
|
||||
**From workflow-session.json** (OPTIONAL - Primary Reference for User Intent):
|
||||
- **ONLY IF EXISTS**: Load user intent context
|
||||
- Original user prompt/intent (project or description field)
|
||||
- User's stated goals and objectives
|
||||
- User's scope definition
|
||||
- **IF MISSING**: Set user_intent_analysis = "SKIPPED: workflow-session.json not found"
|
||||
|
||||
**From role analysis documents** (AUTHORITATIVE SOURCE):
|
||||
- Functional Requirements (IDs, descriptions, acceptance criteria)
|
||||
- Non-Functional Requirements (IDs, targets)
|
||||
- Business Requirements (IDs, success metrics)
|
||||
- Key Architecture Decisions
|
||||
- Risk factors and mitigation strategies
|
||||
- Implementation Roadmap (high-level phases)
|
||||
|
||||
**From IMPL_PLAN.md**:
|
||||
- Summary and objectives
|
||||
- Context Analysis
|
||||
- Implementation Strategy
|
||||
- Task Breakdown Summary
|
||||
- Success Criteria
|
||||
- Brainstorming Artifacts References (if present)
|
||||
|
||||
**From task.json files**:
|
||||
- Task IDs
|
||||
- Titles and descriptions
|
||||
- Status
|
||||
- Dependencies (depends_on, blocks)
|
||||
- Context (requirements, focus_paths, acceptance, artifacts)
|
||||
- Flow control (pre_analysis, implementation_approach)
|
||||
- Meta (complexity, priority)
|
||||
|
||||
### 3. Build Semantic Models
|
||||
|
||||
Create internal representations (do not include raw artifacts in output):
|
||||
|
||||
**Requirements inventory**:
|
||||
- Each functional/non-functional/business requirement with stable ID
|
||||
- Requirement text, acceptance criteria, priority
|
||||
|
||||
**Architecture decisions inventory**:
|
||||
- ADRs from synthesis
|
||||
- Technology choices
|
||||
- Data model references
|
||||
|
||||
**Task coverage mapping**:
|
||||
- Map each task to one or more requirements (by ID reference or keyword inference)
|
||||
- Map each requirement to covering tasks
|
||||
|
||||
**Dependency graph**:
|
||||
- Task-to-task dependencies (depends_on, blocks)
|
||||
- Requirement-level dependencies (from synthesis)
|
||||
|
||||
### 4. Detection Passes (Agent-Driven Multi-Dimensional Analysis)
|
||||
|
||||
**Execution Strategy**:
|
||||
- Single `cli-explore-agent` invocation
|
||||
- Agent executes multiple CLI analyses internally (different dimensions: A-H)
|
||||
- Token Budget: 50 findings maximum (aggregate remainder in overflow summary)
|
||||
- Priority Allocation: CRITICAL (unlimited) → HIGH (15) → MEDIUM (20) → LOW (15)
|
||||
- Early Exit: If CRITICAL findings > 0 in User Intent/Requirements Coverage, skip LOW/MEDIUM checks
|
||||
|
||||
**Execution Order** (Agent orchestrates internally):
|
||||
|
||||
1. **Tier 1 (CRITICAL Path)**: A, B, C - User intent, coverage, consistency (full analysis)
|
||||
2. **Tier 2 (HIGH Priority)**: D, E - Dependencies, synthesis alignment (limit 15 findings)
|
||||
3. **Tier 3 (MEDIUM Priority)**: F - Specification quality (limit 20 findings)
|
||||
4. **Tier 4 (LOW Priority)**: G, H - Duplication, feasibility (limit 15 findings)
|
||||
|
||||
---
|
||||
|
||||
#### Phase 4.1: Launch Unified Verification Agent
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description="Multi-dimensional plan verification",
|
||||
prompt=`
|
||||
## Plan Verification Task
|
||||
|
||||
### MANDATORY FIRST STEPS
|
||||
1. Read: ~/.claude/workflows/cli-templates/schemas/plan-verify-agent-schema.json (dimensions & rules)
|
||||
2. Read: ~/.claude/workflows/cli-templates/schemas/verify-json-schema.json (output schema)
|
||||
3. Read: ${session_file} (user intent)
|
||||
4. Read: ${IMPL_PLAN} (implementation plan)
|
||||
5. Glob: ${task_dir}/*.json (task files)
|
||||
6. Glob: ${SYNTHESIS_DIR}/*/analysis.md (role analyses)
|
||||
|
||||
### Execution Flow
|
||||
|
||||
**Load schema → Execute tiered CLI analysis → Aggregate findings → Write JSON**
|
||||
|
||||
FOR each tier in [1, 2, 3, 4]:
|
||||
- Load tier config from plan-verify-agent-schema.json
|
||||
- Execute: ccw cli -p "PURPOSE: Verify dimensions {tier.dimensions}
|
||||
TASK: {tier.checks from schema}
|
||||
CONTEXT: @${session_dir}/**/*
|
||||
EXPECTED: Findings JSON with dimension, severity, location, summary, recommendation
|
||||
CONSTRAINTS: Limit {tier.limit} findings
|
||||
" --tool gemini --mode analysis --rule {tier.rule}
|
||||
- Parse findings, check early exit condition
|
||||
- IF tier == 1 AND critical_count > 0: skip tier 3-4
|
||||
|
||||
### Output
|
||||
Write: ${process_dir}/verification-findings.json (follow verify-json-schema.json)
|
||||
Return: Quality gate decision + 2-3 sentence summary
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### Phase 4.2: Load and Organize Findings
|
||||
|
||||
```javascript
|
||||
// Load findings (single parse for all subsequent use)
|
||||
const data = JSON.parse(Read(`${process_dir}/verification-findings.json`))
|
||||
const { session_id, timestamp, verification_tiers_completed, findings, summary } = data
|
||||
const { critical_count, high_count, medium_count, low_count, total_findings, coverage_percentage, recommendation } = summary
|
||||
|
||||
// Group by severity and dimension
|
||||
const bySeverity = Object.groupBy(findings, f => f.severity)
|
||||
const byDimension = Object.groupBy(findings, f => f.dimension)
|
||||
|
||||
// Dimension metadata (from schema)
|
||||
const DIMS = {
|
||||
A: "User Intent Alignment", B: "Requirements Coverage", C: "Consistency Validation",
|
||||
D: "Dependency Integrity", E: "Synthesis Alignment", F: "Task Specification Quality",
|
||||
G: "Duplication Detection", H: "Feasibility Assessment"
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Generate Report
|
||||
|
||||
```javascript
|
||||
// Helper: render dimension section
|
||||
const renderDimension = (dim) => {
|
||||
const items = byDimension[dim] || []
|
||||
return items.length > 0
|
||||
? items.map(f => `### ${f.id}: ${f.summary}\n- **Severity**: ${f.severity}\n- **Location**: ${f.location.join(', ')}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
|
||||
: `> ✅ No ${DIMS[dim]} issues detected.`
|
||||
}
|
||||
|
||||
// Helper: render severity section
|
||||
const renderSeverity = (severity, impact) => {
|
||||
const items = bySeverity[severity] || []
|
||||
return items.length > 0
|
||||
? items.map(f => `#### ${f.id}: ${f.summary}\n- **Dimension**: ${f.dimension_name}\n- **Location**: ${f.location.join(', ')}\n- **Impact**: ${impact}\n- **Recommendation**: ${f.recommendation}`).join('\n\n')
|
||||
: `> ✅ No ${severity.toLowerCase()}-severity issues detected.`
|
||||
}
|
||||
|
||||
// Build Markdown report
|
||||
const fullReport = `
|
||||
# Plan Verification Report
|
||||
|
||||
**Session**: WFS-${session_id} | **Generated**: ${timestamp}
|
||||
**Tiers Completed**: ${verification_tiers_completed.join(', ')}
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
| Metric | Value | Status |
|
||||
|--------|-------|--------|
|
||||
| Risk Level | ${critical_count > 0 ? 'CRITICAL' : high_count > 0 ? 'HIGH' : medium_count > 0 ? 'MEDIUM' : 'LOW'} | ${critical_count > 0 ? '🔴' : high_count > 0 ? '🟠' : medium_count > 0 ? '🟡' : '🟢'} |
|
||||
| Critical/High/Medium/Low | ${critical_count}/${high_count}/${medium_count}/${low_count} | |
|
||||
| Coverage | ${coverage_percentage}% | ${coverage_percentage >= 90 ? '🟢' : coverage_percentage >= 75 ? '🟡' : '🔴'} |
|
||||
|
||||
**Recommendation**: **${recommendation}**
|
||||
|
||||
---
|
||||
|
||||
## Findings Summary
|
||||
|
||||
| ID | Dimension | Severity | Location | Summary |
|
||||
|----|-----------|----------|----------|---------|
|
||||
${findings.map(f => `| ${f.id} | ${f.dimension_name} | ${f.severity} | ${f.location.join(', ')} | ${f.summary} |`).join('\n')}
|
||||
|
||||
---
|
||||
|
||||
## Analysis by Dimension
|
||||
|
||||
${['A','B','C','D','E','F','G','H'].map(d => `### ${d}. ${DIMS[d]}\n\n${renderDimension(d)}`).join('\n\n---\n\n')}
|
||||
|
||||
---
|
||||
|
||||
## Findings by Severity
|
||||
|
||||
### CRITICAL (${critical_count})
|
||||
${renderSeverity('CRITICAL', 'Blocks execution')}
|
||||
|
||||
### HIGH (${high_count})
|
||||
${renderSeverity('HIGH', 'Fix before execution recommended')}
|
||||
|
||||
### MEDIUM (${medium_count})
|
||||
${renderSeverity('MEDIUM', 'Address during/after implementation')}
|
||||
|
||||
### LOW (${low_count})
|
||||
${renderSeverity('LOW', 'Optional improvement')}
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
${recommendation === 'BLOCK_EXECUTION' ? '🛑 **BLOCK**: Fix critical issues → Re-verify' :
|
||||
recommendation === 'PROCEED_WITH_FIXES' ? '⚠️ **FIX RECOMMENDED**: Address high issues → Re-verify or Execute' :
|
||||
'✅ **READY**: Proceed to /workflow:execute'}
|
||||
|
||||
Re-verify: \`/workflow:plan-verify --session ${session_id}\`
|
||||
Execute: \`/workflow:execute --resume-session="${session_id}"\`
|
||||
`
|
||||
|
||||
// Write report
|
||||
Write(`${process_dir}/PLAN_VERIFICATION.md`, fullReport)
|
||||
console.log(`✅ Report: ${process_dir}/PLAN_VERIFICATION.md\n📊 ${recommendation} | C:${critical_count} H:${high_count} M:${medium_count} L:${low_count} | Coverage:${coverage_percentage}%`)
|
||||
```
|
||||
|
||||
### 6. Next Step Selection
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
const canExecute = recommendation !== 'BLOCK_EXECUTION'
|
||||
|
||||
// Auto mode
|
||||
if (autoYes) {
|
||||
if (canExecute) {
|
||||
SlashCommand("/workflow:execute --yes --resume-session=\"${session_id}\"")
|
||||
} else {
|
||||
console.log(`[--yes] BLOCK_EXECUTION - Fix ${critical_count} critical issues first.`)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Interactive mode - build options based on quality gate
|
||||
const options = canExecute
|
||||
? [
|
||||
{ label: canExecute && recommendation === 'PROCEED_WITH_FIXES' ? "Execute Anyway" : "Execute (Recommended)",
|
||||
description: "Proceed to /workflow:execute" },
|
||||
{ label: "Review Report", description: "Review findings before deciding" },
|
||||
{ label: "Re-verify", description: "Re-run after manual fixes" }
|
||||
]
|
||||
: [
|
||||
{ label: "Review Report", description: "Review critical issues" },
|
||||
{ label: "Re-verify", description: "Re-run after fixing issues" }
|
||||
]
|
||||
|
||||
const selection = AskUserQuestion({
|
||||
questions: [{
|
||||
question: `Quality gate: ${recommendation}. Next step?`,
|
||||
header: "Action",
|
||||
multiSelect: false,
|
||||
options
|
||||
}]
|
||||
})
|
||||
|
||||
// Handle selection
|
||||
if (selection.includes("Execute")) {
|
||||
SlashCommand("/workflow:execute --resume-session=\"${session_id}\"")
|
||||
} else if (selection === "Re-verify") {
|
||||
SlashCommand("/workflow:plan-verify --session ${session_id}")
|
||||
}
|
||||
```
|
||||
@@ -1,10 +1,15 @@
|
||||
---
|
||||
name: plan
|
||||
description: 5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs with optional CLI auto-execution
|
||||
argument-hint: "[--cli-execute] \"text description\"|file.md"
|
||||
description: 5-phase planning workflow with action-planning-agent task generation, outputs IMPL_PLAN.md and task JSONs
|
||||
argument-hint: "[-y|--yes] \"text description\"|file.md"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*)
|
||||
group: workflow
|
||||
---
|
||||
|
||||
## Auto Mode
|
||||
|
||||
When `--yes` or `-y`: Auto-continue all phases (skip confirmations), use recommended conflict resolutions.
|
||||
|
||||
# Workflow Plan Command (/workflow:plan)
|
||||
|
||||
## Coordinator Role
|
||||
@@ -23,8 +28,8 @@ This workflow runs **fully autonomously** once triggered. Phase 3 (conflict reso
|
||||
5. **Phase 4 executes** → Task generation (task-generate-agent) → Reports final summary
|
||||
|
||||
**Task Attachment Model**:
|
||||
- SlashCommand invocation **expands workflow** by attaching sub-tasks to current TodoWrite
|
||||
- When a sub-command is invoked (e.g., `/workflow:tools:context-gather`), its internal tasks are attached to the orchestrator's TodoWrite
|
||||
- SlashCommand execute **expands workflow** by attaching sub-tasks to current TodoWrite
|
||||
- When a sub-command is executed (e.g., `/workflow:tools:context-gather`), its internal tasks are attached to the orchestrator's TodoWrite
|
||||
- Orchestrator **executes these attached tasks** sequentially
|
||||
- After completion, attached tasks are **collapsed** back to high-level phase summary
|
||||
- This is **task expansion**, not external delegation
|
||||
@@ -43,13 +48,48 @@ This workflow runs **fully autonomously** once triggered. Phase 3 (conflict reso
|
||||
3. **Parse Every Output**: Extract required data from each command/agent output for next phase
|
||||
4. **Auto-Continue via TodoList**: Check TodoList status to execute next pending phase automatically
|
||||
5. **Track Progress**: Update TodoWrite dynamically with task attachment/collapse pattern
|
||||
6. **Task Attachment Model**: SlashCommand invocation **attaches** sub-tasks to current workflow. Orchestrator **executes** these attached tasks itself, then **collapses** them after completion
|
||||
6. **Task Attachment Model**: SlashCommand execute **attaches** sub-tasks to current workflow. Orchestrator **executes** these attached tasks itself, then **collapses** them after completion
|
||||
7. **⚠️ CRITICAL: DO NOT STOP**: Continuous multi-phase workflow. After executing all attached tasks, immediately collapse them and execute next phase
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
└─ Convert user input to structured format (GOAL/SCOPE/CONTEXT)
|
||||
|
||||
Phase 1: Session Discovery
|
||||
└─ /workflow:session:start --auto "structured-description"
|
||||
└─ Output: sessionId (WFS-xxx)
|
||||
|
||||
Phase 2: Context Gathering
|
||||
└─ /workflow:tools:context-gather --session sessionId "structured-description"
|
||||
├─ Tasks attached: Analyze structure → Identify integration → Generate package
|
||||
└─ Output: contextPath + conflict_risk
|
||||
|
||||
Phase 3: Conflict Resolution
|
||||
└─ Decision (conflict_risk check):
|
||||
├─ conflict_risk ≥ medium → Execute /workflow:tools:conflict-resolution
|
||||
│ ├─ Tasks attached: Detect conflicts → Present to user → Apply strategies
|
||||
│ └─ Output: Modified brainstorm artifacts
|
||||
└─ conflict_risk < medium → Skip to Phase 4
|
||||
|
||||
Phase 4: Task Generation
|
||||
└─ /workflow:tools:task-generate-agent --session sessionId
|
||||
└─ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md
|
||||
|
||||
Return:
|
||||
└─ Summary with recommended next steps
|
||||
```
|
||||
|
||||
## 5-Phase Execution
|
||||
|
||||
### Phase 1: Session Discovery
|
||||
**Command**: `SlashCommand(command="/workflow:session:start --auto \"[structured-task-description]\"")`
|
||||
|
||||
**Step 1.1: Execute** - Create or discover workflow session
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:session:start --auto \"[structured-task-description]\"")
|
||||
```
|
||||
|
||||
**Task Description Structure**:
|
||||
```
|
||||
@@ -70,16 +110,54 @@ CONTEXT: Existing user database schema, REST API endpoints
|
||||
|
||||
**Validation**:
|
||||
- Session ID successfully extracted
|
||||
- Session directory `.workflow/[sessionId]/` exists
|
||||
- Session directory `.workflow/active/[sessionId]/` exists
|
||||
|
||||
**Note**: Session directory contains `workflow-session.json` (metadata). Do NOT look for `manifest.json` here - it only exists in `.workflow/archives/` for archived sessions.
|
||||
|
||||
**TodoWrite**: Mark phase 1 completed, phase 2 in_progress
|
||||
|
||||
**After Phase 1**: Return to user showing Phase 1 results, then auto-continue to Phase 2
|
||||
**After Phase 1**: Initialize planning-notes.md with user intent
|
||||
|
||||
```javascript
|
||||
// Create minimal planning notes document
|
||||
const planningNotesPath = `.workflow/active/${sessionId}/planning-notes.md`
|
||||
const userGoal = structuredDescription.goal
|
||||
const userConstraints = structuredDescription.context || "None specified"
|
||||
|
||||
Write(planningNotesPath, `# Planning Notes
|
||||
|
||||
**Session**: ${sessionId}
|
||||
**Created**: ${new Date().toISOString()}
|
||||
|
||||
## User Intent (Phase 1)
|
||||
|
||||
- **GOAL**: ${userGoal}
|
||||
- **KEY_CONSTRAINTS**: ${userConstraints}
|
||||
|
||||
---
|
||||
|
||||
## Context Findings (Phase 2)
|
||||
(To be filled by context-gather)
|
||||
|
||||
## Conflict Decisions (Phase 3)
|
||||
(To be filled if conflicts detected)
|
||||
|
||||
## Consolidated Constraints (Phase 4 Input)
|
||||
1. ${userConstraints}
|
||||
`)
|
||||
```
|
||||
|
||||
Return to user showing Phase 1 results, then auto-continue to Phase 2
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Context Gathering
|
||||
**Command**: `SlashCommand(command="/workflow:tools:context-gather --session [sessionId] \"[structured-task-description]\"")`
|
||||
|
||||
**Step 2.1: Execute** - Gather project context and analyze codebase
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:tools:context-gather --session [sessionId] \"[structured-task-description]\"")
|
||||
```
|
||||
|
||||
**Use Same Structured Description**: Pass the same structured format from Phase 1
|
||||
|
||||
@@ -87,49 +165,85 @@ CONTEXT: Existing user database schema, REST API endpoints
|
||||
|
||||
**Parse Output**:
|
||||
- Extract: context-package.json path (store as `contextPath`)
|
||||
- Typical pattern: `.workflow/[sessionId]/.process/context-package.json`
|
||||
- Typical pattern: `.workflow/active/[sessionId]/.process/context-package.json`
|
||||
|
||||
**Validation**:
|
||||
- Context package path extracted
|
||||
- File exists and is valid JSON
|
||||
- `prioritized_context` field exists
|
||||
|
||||
<!-- TodoWrite: When context-gather invoked, INSERT 3 context-gather tasks, mark first as in_progress -->
|
||||
<!-- TodoWrite: When context-gather executed, INSERT 3 context-gather tasks, mark first as in_progress -->
|
||||
|
||||
**TodoWrite Update (Phase 2 SlashCommand invoked - tasks attached)**:
|
||||
**TodoWrite Update (Phase 2 SlashCommand executed - tasks attached)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2.1: Analyze codebase structure (context-gather)", "status": "in_progress", "activeForm": "Analyzing codebase structure"},
|
||||
{"content": "Phase 2.2: Identify integration points (context-gather)", "status": "pending", "activeForm": "Identifying integration points"},
|
||||
{"content": "Phase 2.3: Generate context package (context-gather)", "status": "pending", "activeForm": "Generating context package"},
|
||||
{"content": "Execute task generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "in_progress", "activeForm": "Executing context gathering"},
|
||||
{"content": " → Analyze codebase structure", "status": "in_progress", "activeForm": "Analyzing codebase structure"},
|
||||
{"content": " → Identify integration points", "status": "pending", "activeForm": "Identifying integration points"},
|
||||
{"content": " → Generate context package", "status": "pending", "activeForm": "Generating context package"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: SlashCommand invocation **attaches** context-gather's 3 tasks. Orchestrator **executes** these tasks sequentially.
|
||||
**Note**: SlashCommand execute **attaches** context-gather's 3 tasks. Orchestrator **executes** these tasks sequentially.
|
||||
|
||||
<!-- TodoWrite: After Phase 2 tasks complete, REMOVE Phase 2.1-2.3, restore to orchestrator view -->
|
||||
|
||||
**TodoWrite Update (Phase 2 completed - tasks collapsed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Execute context gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Execute task generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: Phase 2 tasks completed and collapsed to summary.
|
||||
|
||||
**After Phase 2**: Return to user showing Phase 2 results, then auto-continue to Phase 3/4 (depending on conflict_risk)
|
||||
**After Phase 2**: Update planning-notes.md with context findings, then auto-continue
|
||||
|
||||
```javascript
|
||||
// Read context-package to extract key findings
|
||||
const contextPackage = JSON.parse(Read(contextPath))
|
||||
const conflictRisk = contextPackage.conflict_detection?.risk_level || 'low'
|
||||
const criticalFiles = (contextPackage.exploration_results?.aggregated_insights?.critical_files || [])
|
||||
.slice(0, 5).map(f => f.path)
|
||||
const archPatterns = contextPackage.project_context?.architecture_patterns || []
|
||||
const constraints = contextPackage.exploration_results?.aggregated_insights?.constraints || []
|
||||
|
||||
// Append Phase 2 findings to planning-notes.md
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Context Findings (Phase 2)\n(To be filled by context-gather)',
|
||||
new: `## Context Findings (Phase 2)
|
||||
|
||||
- **CRITICAL_FILES**: ${criticalFiles.join(', ') || 'None identified'}
|
||||
- **ARCHITECTURE**: ${archPatterns.join(', ') || 'Not detected'}
|
||||
- **CONFLICT_RISK**: ${conflictRisk}
|
||||
- **CONSTRAINTS**: ${constraints.length > 0 ? constraints.join('; ') : 'None'}`
|
||||
})
|
||||
|
||||
// Append Phase 2 constraints to consolidated list
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Consolidated Constraints (Phase 4 Input)',
|
||||
new: `## Consolidated Constraints (Phase 4 Input)
|
||||
${constraints.map((c, i) => `${i + 2}. [Context] ${c}`).join('\n')}`
|
||||
})
|
||||
```
|
||||
|
||||
Return to user showing Phase 2 results, then auto-continue to Phase 3/4 (depending on conflict_risk)
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Conflict Resolution (Optional - auto-triggered by conflict risk)
|
||||
### Phase 3: Conflict Resolution
|
||||
|
||||
**Trigger**: Only execute when context-package.json indicates conflict_risk is "medium" or "high"
|
||||
|
||||
**Command**: `SlashCommand(command="/workflow:tools:conflict-resolution --session [sessionId] --context [contextPath]")`
|
||||
**Step 3.1: Execute** - Detect and resolve conflicts with CLI analysis
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:tools:conflict-resolution --session [sessionId] --context [contextPath]")
|
||||
```
|
||||
|
||||
**Input**:
|
||||
- sessionId from Phase 1
|
||||
@@ -138,10 +252,10 @@ CONTEXT: Existing user database schema, REST API endpoints
|
||||
|
||||
**Parse Output**:
|
||||
- Extract: Execution status (success/skipped/failed)
|
||||
- Verify: CONFLICT_RESOLUTION.md file path (if executed)
|
||||
- Verify: conflict-resolution.json file path (if executed)
|
||||
|
||||
**Validation**:
|
||||
- File `.workflow/[sessionId]/.process/CONFLICT_RESOLUTION.md` exists (if executed)
|
||||
- File `.workflow/active/[sessionId]/.process/conflict-resolution.json` exists (if executed)
|
||||
|
||||
**Skip Behavior**:
|
||||
- If conflict_risk is "none" or "low", skip directly to Phase 3.5
|
||||
@@ -149,41 +263,85 @@ CONTEXT: Existing user database schema, REST API endpoints
|
||||
|
||||
<!-- TodoWrite: If conflict_risk ≥ medium, INSERT 3 conflict-resolution tasks -->
|
||||
|
||||
**TodoWrite Update (Phase 3 SlashCommand invoked - tasks attached, if conflict_risk ≥ medium)**:
|
||||
**TodoWrite Update (Phase 3 SlashCommand executed - tasks attached, if conflict_risk ≥ medium)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Execute context gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 3.1: Detect conflicts with CLI analysis (conflict-resolution)", "status": "in_progress", "activeForm": "Detecting conflicts"},
|
||||
{"content": "Phase 3.2: Present conflicts to user (conflict-resolution)", "status": "pending", "activeForm": "Presenting conflicts"},
|
||||
{"content": "Phase 3.3: Apply resolution strategies (conflict-resolution)", "status": "pending", "activeForm": "Applying resolution strategies"},
|
||||
{"content": "Execute task generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 3: Conflict Resolution", "status": "in_progress", "activeForm": "Resolving conflicts"},
|
||||
{"content": " → Detect conflicts with CLI analysis", "status": "in_progress", "activeForm": "Detecting conflicts"},
|
||||
{"content": " → Present conflicts to user", "status": "pending", "activeForm": "Presenting conflicts"},
|
||||
{"content": " → Apply resolution strategies", "status": "pending", "activeForm": "Applying resolution strategies"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: SlashCommand invocation **attaches** conflict-resolution's 3 tasks. Orchestrator **executes** these tasks sequentially.
|
||||
**Note**: SlashCommand execute **attaches** conflict-resolution's 3 tasks. Orchestrator **executes** these tasks sequentially.
|
||||
|
||||
<!-- TodoWrite: After Phase 3 tasks complete, REMOVE Phase 3.1-3.3, restore to orchestrator view -->
|
||||
|
||||
**TodoWrite Update (Phase 3 completed - tasks collapsed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Execute context gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Resolve conflicts and apply fixes", "status": "completed", "activeForm": "Resolving conflicts"},
|
||||
{"content": "Execute task generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 3: Conflict Resolution", "status": "completed", "activeForm": "Resolving conflicts"},
|
||||
{"content": "Phase 4: Task Generation", "status": "pending", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: Phase 3 tasks completed and collapsed to summary.
|
||||
|
||||
**After Phase 3**: Return to user showing conflict resolution results (if executed) and selected strategies, then auto-continue to Phase 3.5
|
||||
**After Phase 3**: Update planning-notes.md with conflict decisions (if executed), then auto-continue
|
||||
|
||||
```javascript
|
||||
// If Phase 3 was executed, update planning-notes.md
|
||||
if (conflictRisk >= 'medium') {
|
||||
const conflictResPath = `.workflow/active/${sessionId}/.process/conflict-resolution.json`
|
||||
|
||||
if (fs.existsSync(conflictResPath)) {
|
||||
const conflictRes = JSON.parse(Read(conflictResPath))
|
||||
const resolved = conflictRes.resolved_conflicts || []
|
||||
const modifiedArtifacts = conflictRes.modified_artifacts || []
|
||||
const planningConstraints = conflictRes.planning_constraints || []
|
||||
|
||||
// Update Phase 3 section
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Conflict Decisions (Phase 3)\n(To be filled if conflicts detected)',
|
||||
new: `## Conflict Decisions (Phase 3)
|
||||
|
||||
- **RESOLVED**: ${resolved.map(r => `${r.type} → ${r.strategy}`).join('; ') || 'None'}
|
||||
- **MODIFIED_ARTIFACTS**: ${modifiedArtifacts.join(', ') || 'None'}
|
||||
- **CONSTRAINTS**: ${planningConstraints.join('; ') || 'None'}`
|
||||
})
|
||||
|
||||
// Append Phase 3 constraints to consolidated list
|
||||
if (planningConstraints.length > 0) {
|
||||
const currentNotes = Read(planningNotesPath)
|
||||
const constraintCount = (currentNotes.match(/^\d+\./gm) || []).length
|
||||
|
||||
Edit(planningNotesPath, {
|
||||
old: '## Consolidated Constraints (Phase 4 Input)',
|
||||
new: `## Consolidated Constraints (Phase 4 Input)
|
||||
${planningConstraints.map((c, i) => `${constraintCount + i + 1}. [Conflict] ${c}`).join('\n')}`
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Return to user showing conflict resolution results (if executed) and selected strategies, then auto-continue to Phase 3.5
|
||||
|
||||
**Memory State Check**:
|
||||
- Evaluate current context window usage and memory state
|
||||
- If memory usage is high (>110K tokens or approaching context limits):
|
||||
- **Command**: `SlashCommand(command="/compact")`
|
||||
- This optimizes memory before proceeding to Phase 3.5
|
||||
- If memory usage is high (>120K tokens or approaching context limits):
|
||||
|
||||
**Step 3.2: Execute** - Optimize memory before proceeding
|
||||
|
||||
```javascript
|
||||
SlashCommand(command="/compact")
|
||||
```
|
||||
|
||||
- Memory compaction is particularly important after analysis phase which may generate extensive documentation
|
||||
- Ensures optimal performance and prevents context overflow
|
||||
|
||||
@@ -217,68 +375,100 @@ CONTEXT: Existing user database schema, REST API endpoints
|
||||
- Task generation translates high-level role analyses into concrete, actionable work items
|
||||
- **Intent priority**: Current user prompt > role analysis.md files > guidance-specification.md
|
||||
|
||||
**Command**:
|
||||
```bash
|
||||
# Default (agent mode)
|
||||
SlashCommand(command="/workflow:tools:task-generate-agent --session [sessionId]")
|
||||
**Step 4.1: Execute** - Generate implementation plan and task JSONs
|
||||
|
||||
# With CLI execution
|
||||
SlashCommand(command="/workflow:tools:task-generate-agent --session [sessionId] --cli-execute")
|
||||
```javascript
|
||||
SlashCommand(command="/workflow:tools:task-generate-agent --session [sessionId]")
|
||||
```
|
||||
|
||||
**Flag**:
|
||||
- `--cli-execute`: Generate tasks with Codex execution commands
|
||||
**CLI Execution Note**: CLI tool usage is now determined semantically by action-planning-agent based on user's task description. If user specifies "use Codex/Gemini/Qwen for X", the agent embeds `command` fields in relevant `implementation_approach` steps.
|
||||
|
||||
**Input**: `sessionId` from Phase 1
|
||||
**Input**:
|
||||
- `sessionId` from Phase 1
|
||||
- **planning-notes.md**: Consolidated constraints from all phases (Phase 1-3)
|
||||
- Path: `.workflow/active/[sessionId]/planning-notes.md`
|
||||
- Contains: User intent, context findings, conflict decisions, consolidated constraints
|
||||
- **Purpose**: Provides structured, minimal context summary to action-planning-agent
|
||||
|
||||
**Validation**:
|
||||
- `.workflow/[sessionId]/IMPL_PLAN.md` exists
|
||||
- `.workflow/[sessionId]/.task/IMPL-*.json` exists (at least one)
|
||||
- `.workflow/[sessionId]/TODO_LIST.md` exists
|
||||
- `.workflow/active/[sessionId]/IMPL_PLAN.md` exists
|
||||
- `.workflow/active/[sessionId]/.task/IMPL-*.json` exists (at least one)
|
||||
- `.workflow/active/[sessionId]/TODO_LIST.md` exists
|
||||
|
||||
<!-- TodoWrite: When task-generate-agent invoked, INSERT 3 task-generate-agent tasks -->
|
||||
<!-- TodoWrite: When task-generate-agent executed, ATTACH 1 agent task -->
|
||||
|
||||
**TodoWrite Update (Phase 4 SlashCommand invoked - tasks attached)**:
|
||||
**TodoWrite Update (Phase 4 SlashCommand executed - agent task attached)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Execute context gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 4.1: Discovery - analyze requirements (task-generate-agent)", "status": "in_progress", "activeForm": "Analyzing requirements"},
|
||||
{"content": "Phase 4.2: Planning - design tasks (task-generate-agent)", "status": "pending", "activeForm": "Designing tasks"},
|
||||
{"content": "Phase 4.3: Output - generate JSONs (task-generate-agent)", "status": "pending", "activeForm": "Generating task JSONs"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 4: Task Generation", "status": "in_progress", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: SlashCommand invocation **attaches** task-generate-agent's 3 tasks. Orchestrator **executes** these tasks.
|
||||
**Note**: Single agent task attached. Agent autonomously completes discovery, planning, and output generation internally.
|
||||
|
||||
**Next Action**: Tasks attached → **Execute Phase 4.1-4.3** sequentially
|
||||
<!-- TodoWrite: After agent completes, mark task as completed -->
|
||||
|
||||
<!-- TodoWrite: After Phase 4 tasks complete, REMOVE Phase 4.1-4.3, restore to orchestrator view -->
|
||||
|
||||
**TodoWrite Update (Phase 4 completed - tasks collapsed)**:
|
||||
**TodoWrite Update (Phase 4 completed)**:
|
||||
```json
|
||||
[
|
||||
{"content": "Execute session discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Execute context gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Execute task generation", "status": "completed", "activeForm": "Executing task generation"}
|
||||
{"content": "Phase 1: Session Discovery", "status": "completed", "activeForm": "Executing session discovery"},
|
||||
{"content": "Phase 2: Context Gathering", "status": "completed", "activeForm": "Executing context gathering"},
|
||||
{"content": "Phase 4: Task Generation", "status": "completed", "activeForm": "Executing task generation"}
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: Phase 4 tasks completed and collapsed to summary.
|
||||
**Note**: Agent task completed. No collapse needed (single task).
|
||||
|
||||
**Return to User**:
|
||||
**Step 4.2: User Decision** - Choose next action
|
||||
|
||||
After Phase 4 completes, present user with action choices:
|
||||
|
||||
```javascript
|
||||
console.log(`
|
||||
✅ Planning complete for session: ${sessionId}
|
||||
📊 Tasks generated: ${taskCount}
|
||||
📋 Plan: .workflow/active/${sessionId}/IMPL_PLAN.md
|
||||
`);
|
||||
|
||||
// Ask user for next action
|
||||
const userChoice = AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Planning complete. What would you like to do next?",
|
||||
header: "Next Action",
|
||||
multiSelect: false,
|
||||
options: [
|
||||
{
|
||||
label: "Verify Plan Quality (Recommended)",
|
||||
description: "Run quality verification to catch issues before execution. Checks plan structure, task dependencies, and completeness."
|
||||
},
|
||||
{
|
||||
label: "Start Execution",
|
||||
description: "Begin implementing tasks immediately. Use this if you've already reviewed the plan or want to start quickly."
|
||||
},
|
||||
{
|
||||
label: "Review Status Only",
|
||||
description: "View task breakdown and session status without taking further action. You can decide what to do next manually."
|
||||
}
|
||||
]
|
||||
}]
|
||||
});
|
||||
|
||||
// Execute based on user choice
|
||||
if (userChoice.answers["Next Action"] === "Verify Plan Quality (Recommended)") {
|
||||
console.log("\n🔍 Starting plan verification...\n");
|
||||
SlashCommand(command="/workflow:plan-verify --session " + sessionId);
|
||||
} else if (userChoice.answers["Next Action"] === "Start Execution") {
|
||||
console.log("\n🚀 Starting task execution...\n");
|
||||
SlashCommand(command="/workflow:execute --session " + sessionId);
|
||||
} else if (userChoice.answers["Next Action"] === "Review Status Only") {
|
||||
console.log("\n📊 Displaying session status...\n");
|
||||
SlashCommand(command="/workflow:status --session " + sessionId);
|
||||
}
|
||||
```
|
||||
Planning complete for session: [sessionId]
|
||||
Tasks generated: [count]
|
||||
Plan: .workflow/[sessionId]/IMPL_PLAN.md
|
||||
|
||||
Recommended Next Steps:
|
||||
1. /workflow:action-plan-verify --session [sessionId] # Verify plan quality before execution
|
||||
2. /workflow:status # Review task breakdown
|
||||
3. /workflow:execute # Start implementation (after verification)
|
||||
|
||||
Quality Gate: Consider running /workflow:action-plan-verify to catch issues early
|
||||
```
|
||||
**Return to User**: Based on user's choice, execute the corresponding workflow command.
|
||||
|
||||
## TodoWrite Pattern
|
||||
|
||||
@@ -286,33 +476,32 @@ Quality Gate: Consider running /workflow:action-plan-verify to catch issues earl
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Task Attachment** (when SlashCommand invoked):
|
||||
1. **Task Attachment** (when SlashCommand executed):
|
||||
- Sub-command's internal tasks are **attached** to orchestrator's TodoWrite
|
||||
- Example: `/workflow:tools:context-gather` attaches 3 sub-tasks (Phase 2.1, 2.2, 2.3)
|
||||
- **Phase 2, 3**: Multiple sub-tasks attached (e.g., Phase 2.1, 2.2, 2.3)
|
||||
- **Phase 4**: Single agent task attached (e.g., "Execute task-generate-agent")
|
||||
- First attached task marked as `in_progress`, others as `pending`
|
||||
- Orchestrator **executes** these attached tasks sequentially
|
||||
|
||||
2. **Task Collapse** (after sub-tasks complete):
|
||||
- Remove detailed sub-tasks from TodoWrite
|
||||
- **Applies to Phase 2, 3**: Remove detailed sub-tasks from TodoWrite
|
||||
- **Collapse** to high-level phase summary
|
||||
- Example: Phase 2.1-2.3 collapse to "Execute context gathering: completed"
|
||||
- **Phase 4**: No collapse needed (single task, just mark completed)
|
||||
- Maintains clean orchestrator-level view
|
||||
|
||||
3. **Continuous Execution**:
|
||||
- After collapse, automatically proceed to next pending phase
|
||||
- After completion, automatically proceed to next pending phase
|
||||
- No user intervention required between phases
|
||||
- TodoWrite dynamically reflects current execution state
|
||||
|
||||
**Lifecycle Summary**: Initial pending tasks → Phase invoked (tasks ATTACHED) → Sub-tasks executed sequentially → Phase completed (tasks COLLAPSED to summary) → Next phase begins → Repeat until all phases complete.
|
||||
**Lifecycle Summary**: Initial pending tasks → Phase executed (tasks ATTACHED) → Sub-tasks executed sequentially → Phase completed (tasks COLLAPSED to summary for Phase 2/3, or marked completed for Phase 4) → Next phase begins → Repeat until all phases complete.
|
||||
|
||||
### Benefits
|
||||
|
||||
- ✓ Real-time visibility into sub-task execution
|
||||
- ✓ Clear mental model: SlashCommand = attach → execute → collapse
|
||||
- ✓ Clean summary after completion
|
||||
- ✓ Easy to track workflow progress
|
||||
|
||||
**Note**: See individual Phase descriptions (Phase 2, 3, 4) for detailed TodoWrite Update examples with full JSON structures.
|
||||
**Note**: See individual Phase descriptions for detailed TodoWrite Update examples:
|
||||
- **Phase 2, 3**: Multiple sub-tasks with attach/collapse pattern
|
||||
- **Phase 4**: Single agent task (no collapse needed)
|
||||
|
||||
## Input Processing
|
||||
|
||||
@@ -356,26 +545,21 @@ User Input (task description)
|
||||
↓
|
||||
Phase 1: session:start --auto "structured-description"
|
||||
↓ Output: sessionId
|
||||
↓ Session Memory: Previous tasks, context, artifacts
|
||||
↓ Write: planning-notes.md (User Intent section)
|
||||
↓
|
||||
Phase 2: context-gather --session sessionId "structured-description"
|
||||
↓ Input: sessionId + session memory + structured description
|
||||
↓ Output: contextPath (context-package.json) + conflict_risk
|
||||
↓ Input: sessionId + structured description
|
||||
↓ Output: contextPath (context-package.json with prioritized_context) + conflict_risk
|
||||
↓ Update: planning-notes.md (Context Findings + Consolidated Constraints)
|
||||
↓
|
||||
Phase 3: conflict-resolution [AUTO-TRIGGERED if conflict_risk ≥ medium]
|
||||
↓ Input: sessionId + contextPath + conflict_risk
|
||||
↓ CLI-powered conflict detection (JSON output)
|
||||
↓ AskUserQuestion: Present conflicts + resolution strategies
|
||||
↓ User selects strategies (or skip)
|
||||
↓ Apply modifications via Edit tool:
|
||||
↓ - Update guidance-specification.md
|
||||
↓ - Update role analyses (*.md)
|
||||
↓ - Mark context-package.json as "resolved"
|
||||
↓ Output: Modified brainstorm artifacts (NO report file)
|
||||
↓ Output: Modified brainstorm artifacts
|
||||
↓ Update: planning-notes.md (Conflict Decisions + Consolidated Constraints)
|
||||
↓ Skip if conflict_risk is none/low → proceed directly to Phase 4
|
||||
↓
|
||||
Phase 4: task-generate-agent --session sessionId [--cli-execute]
|
||||
↓ Input: sessionId + resolved brainstorm artifacts + session memory
|
||||
Phase 4: task-generate-agent --session sessionId
|
||||
↓ Input: sessionId + planning-notes.md + context-package.json + brainstorm artifacts
|
||||
↓ Output: IMPL_PLAN.md, task JSONs, TODO_LIST.md
|
||||
↓
|
||||
Return summary to user
|
||||
@@ -387,11 +571,6 @@ Return summary to user
|
||||
- Brainstorming artifacts (potentially modified by Phase 3)
|
||||
- Session-specific configuration
|
||||
|
||||
**Structured Description Benefits**:
|
||||
- **Clarity**: Clear separation of goal, scope, and context
|
||||
- **Consistency**: Same format across all phases
|
||||
- **Traceability**: Easy to track what was requested
|
||||
- **Precision**: Better context gathering and analysis
|
||||
|
||||
## Execution Flow Diagram
|
||||
|
||||
@@ -403,42 +582,42 @@ User triggers: /workflow:plan "Build authentication system"
|
||||
Phase 1: Session Discovery
|
||||
→ sessionId extracted
|
||||
↓
|
||||
Phase 2: Context Gathering (SlashCommand invoked)
|
||||
→ ATTACH 3 tasks: ← ATTACHED
|
||||
- Phase 2.1: Analyze codebase structure
|
||||
- Phase 2.2: Identify integration points
|
||||
- Phase 2.3: Generate context package
|
||||
→ Execute Phase 2.1-2.3
|
||||
Phase 2: Context Gathering (SlashCommand executed)
|
||||
→ ATTACH 3 sub-tasks: ← ATTACHED
|
||||
- → Analyze codebase structure
|
||||
- → Identify integration points
|
||||
- → Generate context package
|
||||
→ Execute sub-tasks sequentially
|
||||
→ COLLAPSE tasks ← COLLAPSED
|
||||
→ contextPath + conflict_risk extracted
|
||||
↓
|
||||
Conditional Branch: Check conflict_risk
|
||||
├─ IF conflict_risk ≥ medium:
|
||||
│ Phase 3: Conflict Resolution (SlashCommand invoked)
|
||||
│ → ATTACH 3 tasks: ← ATTACHED
|
||||
│ - Phase 3.1: Detect conflicts with CLI analysis
|
||||
│ - Phase 3.2: Present conflicts to user
|
||||
│ - Phase 3.3: Apply resolution strategies
|
||||
│ → Execute Phase 3.1-3.3
|
||||
│ Phase 3: Conflict Resolution (SlashCommand executed)
|
||||
│ → ATTACH 3 sub-tasks: ← ATTACHED
|
||||
│ - → Detect conflicts with CLI analysis
|
||||
│ - → Present conflicts to user
|
||||
│ - → Apply resolution strategies
|
||||
│ → Execute sub-tasks sequentially
|
||||
│ → COLLAPSE tasks ← COLLAPSED
|
||||
│
|
||||
└─ ELSE: Skip Phase 3, proceed to Phase 4
|
||||
↓
|
||||
Phase 4: Task Generation (SlashCommand invoked)
|
||||
→ ATTACH 3 tasks: ← ATTACHED
|
||||
- Phase 4.1: Discovery - analyze requirements
|
||||
- Phase 4.2: Planning - design tasks
|
||||
- Phase 4.3: Output - generate JSONs
|
||||
→ Execute Phase 4.1-4.3
|
||||
→ COLLAPSE tasks ← COLLAPSED
|
||||
Phase 4: Task Generation (SlashCommand executed)
|
||||
→ Single agent task (no sub-tasks)
|
||||
→ Agent autonomously completes internally:
|
||||
(discovery → planning → output)
|
||||
→ Outputs: IMPL_PLAN.md, IMPL-*.json, TODO_LIST.md
|
||||
↓
|
||||
Return summary to user
|
||||
```
|
||||
|
||||
**Key Points**:
|
||||
- **← ATTACHED**: Sub-tasks attached to TodoWrite when SlashCommand invoked
|
||||
- **← COLLAPSED**: Sub-tasks collapsed to summary after completion
|
||||
- **← ATTACHED**: Tasks attached to TodoWrite when SlashCommand executed
|
||||
- Phase 2, 3: Multiple sub-tasks
|
||||
- Phase 4: Single agent task
|
||||
- **← COLLAPSED**: Sub-tasks collapsed to summary after completion (Phase 2, 3 only)
|
||||
- **Phase 4**: Single agent task, no collapse (just mark completed)
|
||||
- **Conditional Branch**: Phase 3 only executes if conflict_risk ≥ medium
|
||||
- **Continuous Flow**: No user intervention between phases
|
||||
|
||||
@@ -458,11 +637,9 @@ Return summary to user
|
||||
- Parse context path from Phase 2 output, store in memory
|
||||
- **Extract conflict_risk from context-package.json**: Determine Phase 3 execution
|
||||
- **If conflict_risk ≥ medium**: Launch Phase 3 conflict-resolution with sessionId and contextPath
|
||||
- Wait for Phase 3 to finish executing (if executed), verify CONFLICT_RESOLUTION.md created
|
||||
- Wait for Phase 3 to finish executing (if executed), verify conflict-resolution.json created
|
||||
- **If conflict_risk is none/low**: Skip Phase 3, proceed directly to Phase 4
|
||||
- **Build Phase 4 command**:
|
||||
- Base command: `/workflow:tools:task-generate-agent --session [sessionId]`
|
||||
- Add `--cli-execute` if flag present
|
||||
- **Build Phase 4 command**: `/workflow:tools:task-generate-agent --session [sessionId]`
|
||||
- Pass session ID to Phase 4 command
|
||||
- Verify all Phase 4 outputs
|
||||
- Update TodoWrite after each phase (dynamically adjust for Phase 3 presence)
|
||||
@@ -509,6 +686,6 @@ CONSTRAINTS: [Limitations or boundaries]
|
||||
- `/workflow:tools:task-generate-agent` - Phase 4: Generate task JSON files with agent-driven approach
|
||||
|
||||
**Follow-up Commands**:
|
||||
- `/workflow:action-plan-verify` - Recommended: Verify plan quality and catch issues before execution
|
||||
- `/workflow:plan-verify` - Recommended: Verify plan quality and catch issues before execution
|
||||
- `/workflow:status` - Review task breakdown and current progress
|
||||
- `/workflow:execute` - Begin implementation of generated tasks
|
||||
|
||||
648
.claude/commands/workflow/replan.md
Normal file
648
.claude/commands/workflow/replan.md
Normal file
@@ -0,0 +1,648 @@
|
||||
---
|
||||
name: replan
|
||||
description: Interactive workflow replanning with session-level artifact updates and boundary clarification through guided questioning
|
||||
argument-hint: "[-y|--yes] [--session session-id] [task-id] \"requirements\"|file.md [--interactive]"
|
||||
allowed-tools: Read(*), Write(*), Edit(*), TodoWrite(*), Glob(*), Bash(*)
|
||||
---
|
||||
|
||||
# Workflow Replan Command
|
||||
|
||||
## Overview
|
||||
Intelligently replans workflow sessions or individual tasks with interactive boundary clarification and comprehensive artifact updates.
|
||||
|
||||
**Core Capabilities**:
|
||||
- **Session Replan**: Updates multiple artifacts (IMPL_PLAN.md, TODO_LIST.md, task JSONs)
|
||||
- **Task Replan**: Focused updates within session context
|
||||
- **Interactive Clarification**: Guided questioning to define modification boundaries
|
||||
- **Impact Analysis**: Automatic detection of affected files and dependencies
|
||||
- **Backup Management**: Preserves previous versions with restore capability
|
||||
|
||||
## Operation Modes
|
||||
|
||||
### Session Replan Mode
|
||||
|
||||
```bash
|
||||
# Auto-detect active session
|
||||
/workflow:replan "添加双因素认证支持"
|
||||
|
||||
# Explicit session
|
||||
/workflow:replan --session WFS-oauth "添加双因素认证支持"
|
||||
|
||||
# File-based input
|
||||
/workflow:replan --session WFS-oauth requirements-update.md
|
||||
|
||||
# Interactive mode
|
||||
/workflow:replan --interactive
|
||||
```
|
||||
|
||||
### Task Replan Mode
|
||||
|
||||
```bash
|
||||
# Direct task update
|
||||
/workflow:replan IMPL-1 "修改为使用 OAuth2.0 标准"
|
||||
|
||||
# Task with explicit session
|
||||
/workflow:replan --session WFS-oauth IMPL-2 "增加单元测试覆盖率到 90%"
|
||||
|
||||
# Interactive mode
|
||||
/workflow:replan IMPL-1 --interactive
|
||||
```
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
├─ Parse flags: --session, --interactive
|
||||
└─ Detect mode: task-id present → Task mode | Otherwise → Session mode
|
||||
|
||||
Phase 1: Mode Detection & Session Discovery
|
||||
├─ Detect operation mode (Task vs Session)
|
||||
├─ Discover/validate session (--session flag or auto-detect)
|
||||
└─ Load session context (workflow-session.json, IMPL_PLAN.md, TODO_LIST.md)
|
||||
|
||||
Phase 2: Interactive Requirement Clarification
|
||||
└─ Decision (by mode):
|
||||
├─ Session mode → 3-4 questions (scope, modules, changes, dependencies)
|
||||
└─ Task mode → 2 questions (update type, ripple effect)
|
||||
|
||||
Phase 3: Impact Analysis & Planning
|
||||
├─ Analyze required changes
|
||||
├─ Generate modification plan
|
||||
└─ User confirmation (Execute / Adjust / Cancel)
|
||||
|
||||
Phase 4: Backup Creation
|
||||
└─ Backup all affected files with manifest
|
||||
|
||||
Phase 5: Apply Modifications
|
||||
├─ Update IMPL_PLAN.md (if needed)
|
||||
├─ Update TODO_LIST.md (if needed)
|
||||
├─ Update/Create/Delete task JSONs
|
||||
└─ Update session metadata
|
||||
|
||||
Phase 6: Verification & Summary
|
||||
├─ Validate consistency (JSON validity, task limits, acyclic dependencies)
|
||||
└─ Generate change summary
|
||||
```
|
||||
|
||||
## Execution Lifecycle
|
||||
|
||||
### Input Parsing
|
||||
|
||||
**Parse flags**:
|
||||
```javascript
|
||||
const sessionFlag = $ARGUMENTS.match(/--session\s+(\S+)/)?.[1]
|
||||
const interactive = $ARGUMENTS.includes('--interactive')
|
||||
const taskIdMatch = $ARGUMENTS.match(/\b(IMPL-\d+(?:\.\d+)?)\b/)
|
||||
const taskId = taskIdMatch?.[1]
|
||||
```
|
||||
|
||||
### Phase 1: Mode Detection & Session Discovery
|
||||
|
||||
**Process**:
|
||||
1. **Detect Operation Mode**:
|
||||
- Check if task ID provided (IMPL-N or IMPL-N.M format) → Task mode
|
||||
- Otherwise → Session mode
|
||||
|
||||
2. **Discover/Validate Session**:
|
||||
- Use `--session` flag if provided
|
||||
- Otherwise auto-detect from `.workflow/active/`
|
||||
- Validate session exists
|
||||
|
||||
3. **Load Session Context**:
|
||||
- Read `workflow-session.json`
|
||||
- List existing tasks
|
||||
- Read `IMPL_PLAN.md` and `TODO_LIST.md`
|
||||
|
||||
4. **Parse Execution Intent** (from requirements text):
|
||||
```javascript
|
||||
// Dynamic tool detection from cli-tools.json
|
||||
// Read enabled tools: ["gemini", "qwen", "codex", ...]
|
||||
const enabledTools = loadEnabledToolsFromConfig(); // See ~/.claude/cli-tools.json
|
||||
|
||||
// Build dynamic patterns from enabled tools
|
||||
function buildExecPatterns(tools) {
|
||||
const patterns = {
|
||||
agent: /改为\s*Agent\s*执行|使用\s*Agent\s*执行/i
|
||||
};
|
||||
tools.forEach(tool => {
|
||||
// Pattern: "使用 {tool} 执行" or "改用 {tool}"
|
||||
patterns[`cli_${tool}`] = new RegExp(
|
||||
`使用\\s*(${tool})\\s*执行|改用\\s*(${tool})`, 'i'
|
||||
);
|
||||
});
|
||||
return patterns;
|
||||
}
|
||||
|
||||
const execPatterns = buildExecPatterns(enabledTools);
|
||||
|
||||
let executionIntent = null
|
||||
for (const [key, pattern] of Object.entries(execPatterns)) {
|
||||
if (pattern.test(requirements)) {
|
||||
executionIntent = key.startsWith('cli_')
|
||||
? { method: 'cli', cli_tool: key.replace('cli_', '') }
|
||||
: { method: 'agent', cli_tool: null }
|
||||
break
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: Session validated, context loaded, mode determined, **executionIntent parsed**
|
||||
|
||||
---
|
||||
|
||||
### Auto Mode Support
|
||||
|
||||
When `--yes` or `-y` flag is used, the command skips interactive clarification and uses safe defaults:
|
||||
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
**Auto Mode Defaults**:
|
||||
- **Modification Scope**: `tasks_only` (safest - only update task details)
|
||||
- **Affected Modules**: All modules related to the task
|
||||
- **Task Changes**: `update_only` (no structural changes)
|
||||
- **Dependency Changes**: `no` (preserve existing dependencies)
|
||||
- **User Confirmation**: Auto-confirm execution
|
||||
|
||||
**Note**: `--interactive` flag overrides `--yes` flag (forces interactive mode).
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Interactive Requirement Clarification
|
||||
|
||||
**Purpose**: Define modification scope through guided questioning
|
||||
|
||||
**Auto Mode Check**:
|
||||
```javascript
|
||||
if (autoYes && !interactive) {
|
||||
// Use defaults and skip to Phase 3
|
||||
console.log(`[--yes] Using safe defaults for replan:`)
|
||||
console.log(` - Scope: tasks_only`)
|
||||
console.log(` - Changes: update_only`)
|
||||
console.log(` - Dependencies: preserve existing`)
|
||||
|
||||
userSelections = {
|
||||
scope: 'tasks_only',
|
||||
modules: 'all_affected',
|
||||
task_changes: 'update_only',
|
||||
dependency_changes: false
|
||||
}
|
||||
// Proceed to Phase 3
|
||||
}
|
||||
```
|
||||
|
||||
#### Session Mode Questions
|
||||
|
||||
**Q1: Modification Scope**
|
||||
```javascript
|
||||
Options:
|
||||
- 仅更新任务细节 (tasks_only)
|
||||
- 修改规划方案 (plan_update)
|
||||
- 重构任务结构 (task_restructure)
|
||||
- 全面重规划 (comprehensive)
|
||||
```
|
||||
|
||||
**Q2: Affected Modules** (if scope >= plan_update)
|
||||
```javascript
|
||||
Options: Dynamically generated from existing tasks' focus_paths
|
||||
- 认证模块 (src/auth)
|
||||
- 用户管理 (src/user)
|
||||
- 全部模块
|
||||
```
|
||||
|
||||
**Q3: Task Changes** (if scope >= task_restructure)
|
||||
```javascript
|
||||
Options:
|
||||
- 添加/删除任务 (add_remove)
|
||||
- 合并/拆分任务 (merge_split)
|
||||
- 仅更新内容 (update_only)
|
||||
// Note: Max 4 options for AskUserQuestion
|
||||
```
|
||||
|
||||
**Q4: Dependency Changes**
|
||||
```javascript
|
||||
Options:
|
||||
- 是,需要重新梳理依赖
|
||||
- 否,保持现有依赖
|
||||
```
|
||||
|
||||
#### Task Mode Questions
|
||||
|
||||
**Q1: Update Type**
|
||||
```javascript
|
||||
Options:
|
||||
- 需求和验收标准 (requirements & acceptance)
|
||||
- 实现方案 (implementation_approach)
|
||||
- 文件范围 (focus_paths)
|
||||
- 依赖关系 (depends_on)
|
||||
- 全部更新
|
||||
```
|
||||
|
||||
**Q2: Ripple Effect**
|
||||
```javascript
|
||||
Options:
|
||||
- 是,需要同步更新依赖任务
|
||||
- 否,仅影响当前任务
|
||||
- 不确定,请帮我分析
|
||||
```
|
||||
|
||||
**Output**: User selections stored, modification boundaries defined
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Impact Analysis & Planning
|
||||
|
||||
**Step 3.1: Analyze Required Changes**
|
||||
|
||||
Determine affected files based on clarification:
|
||||
|
||||
```typescript
|
||||
interface ImpactAnalysis {
|
||||
affected_files: {
|
||||
impl_plan: boolean;
|
||||
todo_list: boolean;
|
||||
session_meta: boolean;
|
||||
tasks: string[];
|
||||
};
|
||||
|
||||
operations: {
|
||||
type: 'create' | 'update' | 'delete' | 'merge' | 'split';
|
||||
target: string;
|
||||
reason: string;
|
||||
}[];
|
||||
|
||||
backup_strategy: {
|
||||
timestamp: string;
|
||||
files: string[];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3.2: Generate Modification Plan**
|
||||
|
||||
```markdown
|
||||
## 修改计划
|
||||
|
||||
### 影响范围
|
||||
- [ ] IMPL_PLAN.md: 更新技术方案第 3 节
|
||||
- [ ] TODO_LIST.md: 添加 2 个新任务,删除 1 个废弃任务
|
||||
- [ ] IMPL-001.json: 更新实现方案
|
||||
- [ ] workflow-session.json: 更新任务计数
|
||||
|
||||
### 变更操作
|
||||
1. **创建**: IMPL-004.json (双因素认证实现)
|
||||
2. **更新**: IMPL-001.json (添加 2FA 准备工作)
|
||||
3. **删除**: IMPL-003.json (已被新方案替代)
|
||||
```
|
||||
|
||||
**Step 3.3: User Confirmation**
|
||||
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Auto-confirm execution
|
||||
console.log(`[--yes] Auto-confirming replan execution`)
|
||||
userConfirmation = '确认执行'
|
||||
// Proceed to Phase 4
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "修改计划已生成,请确认操作:",
|
||||
header: "Confirm",
|
||||
options: [
|
||||
{ label: "确认执行", description: "开始应用所有修改" },
|
||||
{ label: "调整计划", description: "重新回答问题调整范围" },
|
||||
{ label: "取消操作", description: "放弃本次重规划" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Output**: Modification plan confirmed or adjusted
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Backup Creation
|
||||
|
||||
**Process**:
|
||||
|
||||
1. **Create Backup Directory**:
|
||||
```bash
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H-%M-%S")
|
||||
backup_dir=".workflow/active/$SESSION_ID/.process/backup/replan-$timestamp"
|
||||
mkdir -p "$backup_dir"
|
||||
```
|
||||
|
||||
2. **Backup All Affected Files**:
|
||||
- IMPL_PLAN.md
|
||||
- TODO_LIST.md
|
||||
- workflow-session.json
|
||||
- Affected task JSONs
|
||||
|
||||
3. **Create Backup Manifest**:
|
||||
```markdown
|
||||
# Replan Backup Manifest
|
||||
|
||||
**Timestamp**: {timestamp}
|
||||
**Reason**: {replan_reason}
|
||||
**Scope**: {modification_scope}
|
||||
|
||||
## Restoration Command
|
||||
cp {backup_dir}/* .workflow/active/{session}/
|
||||
```
|
||||
|
||||
**Output**: All files safely backed up with manifest
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Apply Modifications
|
||||
|
||||
**Step 5.1: Update IMPL_PLAN.md** (if needed)
|
||||
|
||||
Use Edit tool to modify specific sections:
|
||||
- Update affected technical sections
|
||||
- Update modification date
|
||||
|
||||
**Step 5.2: Update TODO_LIST.md** (if needed)
|
||||
|
||||
- Add new tasks with `[ ]` checkbox
|
||||
- Mark deleted tasks as `[x] ~~task~~ (已废弃)`
|
||||
- Update modified task descriptions
|
||||
|
||||
**Step 5.3: Update Task JSONs**
|
||||
|
||||
For each affected task:
|
||||
```typescript
|
||||
const updated_task = {
|
||||
...task,
|
||||
context: {
|
||||
...task.context,
|
||||
requirements: [...updated_requirements],
|
||||
acceptance: [...updated_acceptance]
|
||||
},
|
||||
flow_control: {
|
||||
...task.flow_control,
|
||||
implementation_approach: [...updated_steps]
|
||||
},
|
||||
// Update execution config if intent detected
|
||||
...(executionIntent && {
|
||||
meta: {
|
||||
...task.meta,
|
||||
execution_config: {
|
||||
method: executionIntent.method,
|
||||
cli_tool: executionIntent.cli_tool,
|
||||
enable_resume: executionIntent.method !== 'agent'
|
||||
}
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
Write({
|
||||
file_path: `.workflow/active/${SESSION_ID}/.task/${task_id}.json`,
|
||||
content: JSON.stringify(updated_task, null, 2)
|
||||
});
|
||||
```
|
||||
|
||||
**Note**: Implementation approach steps are NO LONGER modified. CLI execution is controlled by task-level `meta.execution_config` only.
|
||||
|
||||
**Step 5.4: Create New Tasks** (if needed)
|
||||
|
||||
Generate complete task JSON with all required fields:
|
||||
- id, title, status
|
||||
- meta (type, agent)
|
||||
- context (requirements, focus_paths, acceptance)
|
||||
- flow_control (pre_analysis, implementation_approach, target_files)
|
||||
|
||||
**Step 5.5: Delete Obsolete Tasks** (if needed)
|
||||
|
||||
Move to backup instead of hard delete:
|
||||
```bash
|
||||
mv ".workflow/active/$SESSION_ID/.task/{task-id}.json" "$backup_dir/"
|
||||
```
|
||||
|
||||
**Step 5.6: Update Session Metadata**
|
||||
|
||||
Update workflow-session.json:
|
||||
- progress.current_tasks
|
||||
- progress.last_replan
|
||||
- replan_history array
|
||||
|
||||
**Output**: All modifications applied, artifacts updated
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: Verification & Summary
|
||||
|
||||
**Step 6.1: Verify Consistency**
|
||||
|
||||
1. Validate all task JSONs are valid JSON
|
||||
2. Check task count within limits (max 10)
|
||||
3. Verify dependency graph is acyclic
|
||||
|
||||
**Step 6.2: Generate Change Summary**
|
||||
|
||||
```markdown
|
||||
## 重规划完成
|
||||
|
||||
### 会话信息
|
||||
- **Session**: {session-id}
|
||||
- **时间**: {timestamp}
|
||||
- **备份**: {backup-path}
|
||||
|
||||
### 变更摘要
|
||||
**范围**: {scope}
|
||||
**原因**: {reason}
|
||||
|
||||
### 修改的文件
|
||||
- ✓ IMPL_PLAN.md: {changes}
|
||||
- ✓ TODO_LIST.md: {changes}
|
||||
- ✓ Task JSONs: {count} files updated
|
||||
|
||||
### 任务变更
|
||||
- **新增**: {task-ids}
|
||||
- **删除**: {task-ids}
|
||||
- **更新**: {task-ids}
|
||||
|
||||
### 回滚方法
|
||||
cp {backup-path}/* .workflow/active/{session}/
|
||||
```
|
||||
|
||||
**Output**: Summary displayed, replan complete
|
||||
|
||||
---
|
||||
|
||||
## TodoWrite Progress Tracking
|
||||
|
||||
### Session Mode Progress
|
||||
|
||||
```json
|
||||
[
|
||||
{"content": "检测模式和发现会话", "status": "completed", "activeForm": "检测模式和发现会话"},
|
||||
{"content": "交互式需求明确", "status": "completed", "activeForm": "交互式需求明确"},
|
||||
{"content": "影响分析和计划生成", "status": "completed", "activeForm": "影响分析和计划生成"},
|
||||
{"content": "创建备份", "status": "completed", "activeForm": "创建备份"},
|
||||
{"content": "更新会话产出文件", "status": "completed", "activeForm": "更新会话产出文件"},
|
||||
{"content": "验证一致性", "status": "completed", "activeForm": "验证一致性"}
|
||||
]
|
||||
```
|
||||
|
||||
### Task Mode Progress
|
||||
|
||||
```json
|
||||
[
|
||||
{"content": "检测会话和加载任务", "status": "completed", "activeForm": "检测会话和加载任务"},
|
||||
{"content": "交互式更新确认", "status": "completed", "activeForm": "交互式更新确认"},
|
||||
{"content": "应用任务修改", "status": "completed", "activeForm": "应用任务修改"}
|
||||
]
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Session Errors
|
||||
|
||||
```bash
|
||||
# No active session found
|
||||
ERROR: No active session found
|
||||
Run /workflow:session:start to create a session
|
||||
|
||||
# Session not found
|
||||
ERROR: Session WFS-invalid not found
|
||||
Available sessions: [list]
|
||||
|
||||
# No changes specified
|
||||
WARNING: No modifications specified
|
||||
Use --interactive mode or provide requirements
|
||||
```
|
||||
|
||||
### Task Errors
|
||||
|
||||
```bash
|
||||
# Task not found
|
||||
ERROR: Task IMPL-999 not found in session
|
||||
Available tasks: [list]
|
||||
|
||||
# Task completed
|
||||
WARNING: Task IMPL-001 is completed
|
||||
Consider creating new task for additional work
|
||||
|
||||
# Circular dependency
|
||||
ERROR: Circular dependency detected
|
||||
Resolve dependency conflicts before proceeding
|
||||
```
|
||||
|
||||
### Validation Errors
|
||||
|
||||
```bash
|
||||
# Task limit exceeded
|
||||
ERROR: Replan would create 12 tasks (limit: 10)
|
||||
Consider: combining tasks, splitting sessions, or removing tasks
|
||||
|
||||
# Invalid JSON
|
||||
ERROR: Generated invalid JSON
|
||||
Backup preserved, rolling back changes
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-session-name/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
│ ├── IMPL-001.json
|
||||
│ ├── IMPL-002.json
|
||||
│ └── IMPL-003.json
|
||||
└── .process/
|
||||
├── context-package.json
|
||||
└── backup/
|
||||
└── replan-{timestamp}/
|
||||
├── MANIFEST.md
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── workflow-session.json
|
||||
└── IMPL-*.json
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Session Replan - Add Feature
|
||||
|
||||
```bash
|
||||
/workflow:replan "添加双因素认证支持"
|
||||
|
||||
# Interactive clarification
|
||||
Q: 修改范围?
|
||||
A: 全面重规划
|
||||
|
||||
Q: 受影响模块?
|
||||
A: 认证模块, API接口
|
||||
|
||||
Q: 任务变更?
|
||||
A: 添加新任务, 更新内容
|
||||
|
||||
# Execution
|
||||
✓ 创建备份
|
||||
✓ 更新 IMPL_PLAN.md
|
||||
✓ 更新 TODO_LIST.md
|
||||
✓ 创建 IMPL-004.json
|
||||
✓ 更新 IMPL-001.json, IMPL-002.json
|
||||
|
||||
重规划完成! 新增 1 任务,更新 2 任务
|
||||
```
|
||||
|
||||
### Task Replan - Update Requirements
|
||||
|
||||
```bash
|
||||
/workflow:replan IMPL-001 "支持 OAuth2.0 标准"
|
||||
|
||||
# Interactive clarification
|
||||
Q: 更新部分?
|
||||
A: 需求和验收标准, 实现方案
|
||||
|
||||
Q: 影响其他任务?
|
||||
A: 是,需要同步更新依赖任务
|
||||
|
||||
# Execution
|
||||
✓ 创建备份
|
||||
✓ 更新 IMPL-001.json
|
||||
✓ 更新 IMPL-002.json (依赖任务)
|
||||
|
||||
任务重规划完成! 更新 2 个任务
|
||||
```
|
||||
|
||||
### Task Replan - Change Execution Method
|
||||
|
||||
```bash
|
||||
/workflow:replan IMPL-001 "改用 Codex 执行"
|
||||
|
||||
# Semantic parsing detects executionIntent:
|
||||
# { method: 'cli', cli_tool: 'codex' }
|
||||
|
||||
# Execution (no interactive questions needed)
|
||||
✓ 创建备份
|
||||
✓ 更新 IMPL-001.json
|
||||
- meta.execution_config = { method: 'cli', cli_tool: 'codex', enable_resume: true }
|
||||
|
||||
任务执行方式已更新: Agent → CLI (codex)
|
||||
```
|
||||
|
||||
```bash
|
||||
/workflow:replan IMPL-002 "改为 Agent 执行"
|
||||
|
||||
# Semantic parsing detects executionIntent:
|
||||
# { method: 'agent', cli_tool: null }
|
||||
|
||||
# Execution
|
||||
✓ 创建备份
|
||||
✓ 更新 IMPL-002.json
|
||||
- meta.execution_config = { method: 'agent', cli_tool: null }
|
||||
|
||||
任务执行方式已更新: CLI → Agent
|
||||
```
|
||||
@@ -1,105 +0,0 @@
|
||||
---
|
||||
name: resume
|
||||
description: Resume paused workflow session with automatic progress analysis, pending task identification, and conflict detection
|
||||
argument-hint: "session-id for workflow session to resume"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*)
|
||||
---
|
||||
|
||||
# Sequential Workflow Resume Command
|
||||
|
||||
## Usage
|
||||
```bash
|
||||
/workflow:resume "<session-id>"
|
||||
```
|
||||
|
||||
## Purpose
|
||||
**Sequential command coordination for workflow resumption** by first analyzing current session status, then continuing execution with special resume context. This command orchestrates intelligent session resumption through two-step process.
|
||||
|
||||
## Command Coordination Workflow
|
||||
|
||||
### Phase 1: Status Analysis
|
||||
1. **Call status command**: Execute `/workflow:status` to analyze current session state
|
||||
2. **Verify session information**: Check session ID, progress, and current task status
|
||||
3. **Identify resume point**: Determine where workflow was interrupted
|
||||
|
||||
### Phase 2: Resume Execution
|
||||
1. **Call execute with resume flag**: Execute `/workflow:execute --resume-session="{session-id}"`
|
||||
2. **Pass session context**: Provide analyzed session information to execute command
|
||||
3. **Direct agent execution**: Skip discovery phase, directly enter TodoWrite and agent execution
|
||||
|
||||
## Implementation Protocol
|
||||
|
||||
### Sequential Command Execution
|
||||
```bash
|
||||
# Phase 1: Analyze current session status
|
||||
SlashCommand(command="/workflow:status")
|
||||
|
||||
# Phase 2: Resume execution with special flag
|
||||
SlashCommand(command="/workflow:execute --resume-session=\"{session-id}\"")
|
||||
```
|
||||
|
||||
### Progress Tracking
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{
|
||||
content: "Analyze current session status and progress",
|
||||
status: "in_progress",
|
||||
activeForm: "Analyzing session status"
|
||||
},
|
||||
{
|
||||
content: "Resume workflow execution with session context",
|
||||
status: "pending",
|
||||
activeForm: "Resuming workflow execution"
|
||||
}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## Resume Information Flow
|
||||
|
||||
### Status Analysis Results
|
||||
The `/workflow:status` command provides:
|
||||
- **Session ID**: Current active session identifier
|
||||
- **Current Progress**: Completed, in-progress, and pending tasks
|
||||
- **Interruption Point**: Last executed task and next pending task
|
||||
- **Session State**: Overall workflow status
|
||||
|
||||
### Execute Command Context
|
||||
The special `--resume-session` flag tells `/workflow:execute`:
|
||||
- **Skip Discovery**: Don't search for sessions, use provided session ID
|
||||
- **Direct Execution**: Go straight to TodoWrite generation and agent launching
|
||||
- **Context Restoration**: Use existing session state and summaries
|
||||
- **Resume Point**: Continue from identified interruption point
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Session Validation Failures
|
||||
- **Session not found**: Report missing session, suggest available sessions
|
||||
- **Session inactive**: Recommend activating session first
|
||||
- **Status command fails**: Retry once, then report analysis failure
|
||||
|
||||
### Execute Resumption Failures
|
||||
- **No pending tasks**: Report workflow completion status
|
||||
- **Execute command fails**: Report resumption failure, suggest manual intervention
|
||||
|
||||
## Success Criteria
|
||||
1. **Status analysis complete**: Session state properly analyzed and reported
|
||||
2. **Execute command launched**: Resume execution started with proper context
|
||||
3. **Agent coordination**: TodoWrite and agent execution initiated successfully
|
||||
4. **Context preservation**: Session state and progress properly maintained
|
||||
|
||||
## Related Commands
|
||||
|
||||
**Prerequisite Commands**:
|
||||
- `/workflow:plan` or `/workflow:execute` - Workflow must be in progress or paused
|
||||
|
||||
**Called by This Command** (2 phases):
|
||||
- `/workflow:status` - Phase 1: Analyze current session status and identify resume point
|
||||
- `/workflow:execute` - Phase 2: Resume execution with `--resume-session` flag
|
||||
|
||||
**Follow-up Commands**:
|
||||
- None - Workflow continues automatically via `/workflow:execute`
|
||||
|
||||
---
|
||||
*Sequential command coordination for workflow session resumption*
|
||||
608
.claude/commands/workflow/review-cycle-fix.md
Normal file
608
.claude/commands/workflow/review-cycle-fix.md
Normal file
@@ -0,0 +1,608 @@
|
||||
---
|
||||
name: review-cycle-fix
|
||||
description: Automated fixing of code review findings with AI-powered planning and coordinated execution. Uses intelligent grouping, multi-stage timeline coordination, and test-driven verification.
|
||||
argument-hint: "<export-file|review-dir> [--resume] [--max-iterations=N]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*), Edit(*), Write(*)
|
||||
---
|
||||
|
||||
# Workflow Review-Cycle-Fix Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Fix from exported findings file (session-based path)
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/fix-export-1706184622000.json
|
||||
|
||||
# Fix from review directory (auto-discovers latest export)
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/
|
||||
|
||||
# Resume interrupted fix session
|
||||
/workflow:review-cycle-fix --resume
|
||||
|
||||
# Custom max retry attempts per finding
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-123/.review/ --max-iterations=5
|
||||
```
|
||||
|
||||
**Fix Source**: Exported findings from review cycle dashboard
|
||||
**Output Directory**: `{review-dir}/fixes/{fix-session-id}/` (within session .review/)
|
||||
**Default Max Iterations**: 3 (per finding, adjustable)
|
||||
**CLI Tools**: @cli-planning-agent (planning), @cli-execute-agent (fixing)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Automated fix orchestrator with **two-phase architecture**: AI-powered planning followed by coordinated parallel/serial execution. Generates fix timeline with intelligent grouping and dependency analysis, then executes fixes with conservative test verification.
|
||||
|
||||
**Fix Process**:
|
||||
- **Planning Phase**: AI analyzes findings, generates fix plan with grouping and execution strategy
|
||||
- **Execution Phase**: Main orchestrator coordinates agents per timeline stages
|
||||
- **No rigid structure**: Adapts to task requirements, not bound to fixed JSON format
|
||||
|
||||
**vs Manual Fixing**:
|
||||
- **Manual**: Developer reviews findings one-by-one, fixes sequentially
|
||||
- **Automated**: AI groups related issues, executes in optimal parallel/serial order with automatic test verification
|
||||
|
||||
### Value Proposition
|
||||
1. **Intelligent Planning**: AI-powered analysis identifies optimal grouping and execution strategy
|
||||
2. **Multi-stage Coordination**: Supports complex parallel + serial execution with dependency management
|
||||
3. **Conservative Safety**: Mandatory test verification with automatic rollback on failure
|
||||
4. **Resume Support**: Checkpoint-based recovery for interrupted sessions
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for automated review finding fixes
|
||||
- Manages: Planning phase coordination, stage-based execution, agent scheduling, progress tracking
|
||||
- Delegates: Fix planning to @cli-planning-agent, fix execution to @cli-execute-agent
|
||||
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Validate export file, create fix session structure, initialize state files
|
||||
|
||||
Phase 2: Planning Coordination (@cli-planning-agent)
|
||||
├─ Analyze findings for patterns and dependencies
|
||||
├─ Group by file + dimension + root cause similarity
|
||||
├─ Determine execution strategy (parallel/serial/hybrid)
|
||||
├─ Generate fix timeline with stages
|
||||
└─ Output: fix-plan.json
|
||||
|
||||
Phase 3: Execution Orchestration (Stage-based)
|
||||
For each timeline stage:
|
||||
├─ Load groups for this stage
|
||||
├─ If parallel: Launch all group agents simultaneously
|
||||
├─ If serial: Execute groups sequentially
|
||||
├─ Each agent:
|
||||
│ ├─ Analyze code context
|
||||
│ ├─ Apply fix per strategy
|
||||
│ ├─ Run affected tests
|
||||
│ ├─ On test failure: Rollback, retry up to max_iterations
|
||||
│ └─ On success: Commit, update fix-progress-{N}.json
|
||||
└─ Advance to next stage
|
||||
|
||||
Phase 4: Completion & Aggregation
|
||||
└─ Aggregate results → Generate fix-summary.md → Update history → Output summary
|
||||
|
||||
Phase 5: Session Completion (Optional)
|
||||
└─ If all fixes successful → Prompt to complete workflow session
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Input validation, session management, planning coordination, stage-based execution scheduling, progress tracking, aggregation |
|
||||
| **@cli-planning-agent** | Findings analysis, intelligent grouping (file+dimension+root cause), execution strategy determination (parallel/serial/hybrid), timeline generation with dependency mapping |
|
||||
| **@cli-execute-agent** | Fix execution per group, code context analysis, Edit tool operations, test verification, git rollback on failure, completion JSON generation |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Two-Phase Architecture
|
||||
|
||||
**Phase Separation**:
|
||||
|
||||
| Phase | Agent | Output | Purpose |
|
||||
|-------|-------|--------|---------|
|
||||
| **Planning** | @cli-planning-agent | fix-plan.json | Analyze findings, group intelligently, determine optimal execution strategy |
|
||||
| **Execution** | @cli-execute-agent | completions/*.json | Execute fixes per plan with test verification and rollback |
|
||||
|
||||
**Benefits**:
|
||||
- Clear separation of concerns (analysis vs execution)
|
||||
- Reusable plans (can re-execute without re-planning)
|
||||
- Better error isolation (planning failures vs execution failures)
|
||||
|
||||
### 2. Intelligent Grouping Strategy
|
||||
|
||||
**Three-Level Grouping**:
|
||||
|
||||
```javascript
|
||||
// Level 1: Primary grouping by file + dimension
|
||||
{file: "auth.ts", dimension: "security"} → Group A
|
||||
{file: "auth.ts", dimension: "quality"} → Group B
|
||||
{file: "query-builder.ts", dimension: "security"} → Group C
|
||||
|
||||
// Level 2: Secondary grouping by root cause similarity
|
||||
Group A findings → Semantic similarity analysis (threshold 0.7)
|
||||
→ Sub-group A1: "missing-input-validation" (findings 1, 2)
|
||||
→ Sub-group A2: "insecure-crypto" (finding 3)
|
||||
|
||||
// Level 3: Dependency analysis
|
||||
Sub-group A1 creates validation utilities
|
||||
Sub-group C4 depends on those utilities
|
||||
→ A1 must execute before C4 (serial stage dependency)
|
||||
```
|
||||
|
||||
**Similarity Computation**:
|
||||
- Combine: `description + recommendation + category`
|
||||
- Vectorize: TF-IDF or LLM embedding
|
||||
- Cluster: Greedy algorithm with cosine similarity > 0.7
|
||||
|
||||
### 3. Execution Strategy Determination
|
||||
|
||||
**Strategy Types**:
|
||||
|
||||
| Strategy | When to Use | Stage Structure |
|
||||
|----------|-------------|-----------------|
|
||||
| **Parallel** | All groups independent, different files | Single stage, all groups in parallel |
|
||||
| **Serial** | Strong dependencies, shared resources | Multiple stages, one group per stage |
|
||||
| **Hybrid** | Mixed dependencies | Multiple stages, parallel within stages |
|
||||
|
||||
**Dependency Detection**:
|
||||
- Shared file modifications
|
||||
- Utility creation + usage patterns
|
||||
- Test dependency chains
|
||||
- Risk level clustering (high-risk groups isolated)
|
||||
|
||||
### 4. Conservative Test Verification
|
||||
|
||||
**Test Strategy** (per fix):
|
||||
|
||||
```javascript
|
||||
// 1. Identify affected tests
|
||||
const testPattern = identifyTestPattern(finding.file);
|
||||
// e.g., "tests/auth/**/*.test.*" for src/auth/service.ts
|
||||
|
||||
// 2. Run tests
|
||||
const result = await runTests(testPattern);
|
||||
|
||||
// 3. Evaluate
|
||||
if (result.passRate < 100%) {
|
||||
// Rollback
|
||||
await gitCheckout(finding.file);
|
||||
|
||||
// Retry with failure context
|
||||
if (attempts < maxIterations) {
|
||||
const fixContext = analyzeFailure(result.stderr);
|
||||
regenerateFix(finding, fixContext);
|
||||
retry();
|
||||
} else {
|
||||
markFailed(finding.id);
|
||||
}
|
||||
} else {
|
||||
// Commit
|
||||
await gitCommit(`Fix: ${finding.title} [${finding.id}]`);
|
||||
markFixed(finding.id);
|
||||
}
|
||||
```
|
||||
|
||||
**Pass Criteria**: 100% test pass rate (no partial fixes)
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
- Input validation: Check export file exists and is valid JSON
|
||||
- Auto-discovery: If review-dir provided, find latest `*-fix-export.json`
|
||||
- Session creation: Generate fix-session-id (`fix-{timestamp}`)
|
||||
- Directory structure: Create `{review-dir}/fixes/{fix-session-id}/` with subdirectories
|
||||
- State files: Initialize active-fix-session.json (session marker)
|
||||
- TodoWrite initialization: Set up 4-phase tracking
|
||||
|
||||
**Phase 2: Planning Coordination**
|
||||
- Launch @cli-planning-agent with findings data and project context
|
||||
- Validate fix-plan.json output (schema conformance, includes metadata with session status)
|
||||
- Load plan into memory for execution phase
|
||||
- TodoWrite update: Mark planning complete, start execution
|
||||
|
||||
**Phase 3: Execution Orchestration**
|
||||
- Load fix-plan.json timeline stages
|
||||
- For each stage:
|
||||
- If parallel mode: Launch all group agents via `Promise.all()`
|
||||
- If serial mode: Execute groups sequentially with `await`
|
||||
- Assign agent IDs (agents update their fix-progress-{N}.json)
|
||||
- Handle agent failures gracefully (mark group as failed, continue)
|
||||
- Advance to next stage only when current stage complete
|
||||
|
||||
**Phase 4: Completion & Aggregation**
|
||||
- Collect final status from all fix-progress-{N}.json files
|
||||
- Generate fix-summary.md with timeline and results
|
||||
- Update fix-history.json with new session entry
|
||||
- Remove active-fix-session.json
|
||||
- TodoWrite completion: Mark all phases done
|
||||
- Output summary to user
|
||||
|
||||
**Phase 5: Session Completion (Optional)**
|
||||
- If all findings fixed successfully (no failures):
|
||||
- Prompt user: "All fixes complete. Complete workflow session? [Y/n]"
|
||||
- If confirmed: Execute `/workflow:session:complete` to archive session with lessons learned
|
||||
- If partial success (some failures):
|
||||
- Output: "Some findings failed. Review fix-summary.md before completing session."
|
||||
- Do NOT auto-complete session
|
||||
|
||||
### Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── fix-export-{timestamp}.json # Exported findings (input)
|
||||
└── fixes/{fix-session-id}/
|
||||
├── fix-plan.json # Planning agent output (execution plan with metadata)
|
||||
├── fix-progress-1.json # Group 1 progress (planning agent init → agent updates)
|
||||
├── fix-progress-2.json # Group 2 progress (planning agent init → agent updates)
|
||||
├── fix-progress-3.json # Group 3 progress (planning agent init → agent updates)
|
||||
├── fix-summary.md # Final report (orchestrator generates)
|
||||
├── active-fix-session.json # Active session marker
|
||||
└── fix-history.json # All sessions history
|
||||
```
|
||||
|
||||
**File Producers**:
|
||||
- **Planning Agent**: `fix-plan.json` (with metadata), all `fix-progress-*.json` (initial state)
|
||||
- **Execution Agents**: Update assigned `fix-progress-{N}.json` in real-time
|
||||
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Planning Agent**:
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-planning-agent",
|
||||
description: `Generate fix plan and initialize progress files for ${findings.length} findings`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Analyze ${findings.length} code review findings and generate execution plan with intelligent grouping and timeline coordination.
|
||||
|
||||
## Input Data
|
||||
Review Session: ${reviewId}
|
||||
Fix Session ID: ${fixSessionId}
|
||||
Total Findings: ${findings.length}
|
||||
|
||||
Findings:
|
||||
${JSON.stringify(findings, null, 2)}
|
||||
|
||||
Project Context:
|
||||
- Structure: ${projectStructure}
|
||||
- Test Framework: ${testFramework}
|
||||
- Git Status: ${gitStatus}
|
||||
|
||||
## Output Requirements
|
||||
|
||||
### 1. fix-plan.json
|
||||
Execute: cat ~/.claude/workflows/cli-templates/fix-plan-template.json
|
||||
|
||||
Generate execution plan following template structure:
|
||||
|
||||
**Key Generation Rules**:
|
||||
- **Metadata**: Populate fix_session_id, review_session_id, status ("planning"), created_at, started_at timestamps
|
||||
- **Execution Strategy**: Choose approach (parallel/serial/hybrid) based on dependency analysis, set parallel_limit and stages count
|
||||
- **Groups**: Create groups (G1, G2, ...) with intelligent grouping (see Analysis Requirements below), assign progress files (fix-progress-1.json, ...), populate fix_strategy with approach/complexity/test_pattern, assess risks, identify dependencies
|
||||
- **Timeline**: Define stages respecting dependencies, set execution_mode per stage, map groups to stages, calculate critical path
|
||||
|
||||
### 2. fix-progress-{N}.json (one per group)
|
||||
Execute: cat ~/.claude/workflows/cli-templates/fix-progress-template.json
|
||||
|
||||
For each group (G1, G2, G3, ...), generate fix-progress-{N}.json following template structure:
|
||||
|
||||
**Initial State Requirements**:
|
||||
- Status: "pending", phase: "waiting"
|
||||
- Timestamps: Set last_update to now, others null
|
||||
- Findings: Populate from review findings with status "pending", all operation fields null
|
||||
- Summary: Initialize all counters to zero
|
||||
- Flow control: Empty implementation_approach array
|
||||
- Errors: Empty array
|
||||
|
||||
**CRITICAL**: Ensure complete template structure - all fields must be present.
|
||||
|
||||
## Analysis Requirements
|
||||
|
||||
### Intelligent Grouping Strategy
|
||||
Group findings using these criteria (in priority order):
|
||||
|
||||
1. **File Proximity**: Findings in same file or related files
|
||||
2. **Dimension Affinity**: Same dimension (security, performance, etc.)
|
||||
3. **Root Cause Similarity**: Similar underlying issues
|
||||
4. **Fix Approach Commonality**: Can be fixed with similar approach
|
||||
|
||||
**Grouping Guidelines**:
|
||||
- Optimal group size: 2-5 findings per group
|
||||
- Avoid cross-cutting concerns in same group
|
||||
- Consider test isolation (different test suites → different groups)
|
||||
- Balance workload across groups for parallel execution
|
||||
|
||||
### Execution Strategy Determination
|
||||
|
||||
**Parallel Mode**: Use when groups are independent, no shared files
|
||||
**Serial Mode**: Use when groups have dependencies or shared resources
|
||||
**Hybrid Mode**: Use for mixed dependency graphs (recommended for most cases)
|
||||
|
||||
**Dependency Analysis**:
|
||||
- Identify shared files between groups
|
||||
- Detect test dependency chains
|
||||
- Evaluate risk of concurrent modifications
|
||||
|
||||
### Risk Assessment
|
||||
|
||||
For each group, evaluate:
|
||||
- **Complexity**: Based on code structure, file size, existing tests
|
||||
- **Impact Scope**: Number of files affected, API surface changes
|
||||
- **Rollback Feasibility**: Ease of reverting changes if tests fail
|
||||
|
||||
### Test Strategy
|
||||
|
||||
For each group, determine:
|
||||
- **Test Pattern**: Glob pattern matching affected tests
|
||||
- **Pass Criteria**: All tests must pass (100% pass rate)
|
||||
- **Test Command**: Infer from project (package.json, pytest.ini, etc.)
|
||||
|
||||
## Output Files
|
||||
|
||||
Write to ${sessionDir}:
|
||||
- ./fix-plan.json
|
||||
- ./fix-progress-1.json
|
||||
- ./fix-progress-2.json
|
||||
- ./fix-progress-{N}.json (as many as groups created)
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before finalizing outputs:
|
||||
- ✅ All findings assigned to exactly one group
|
||||
- ✅ Group dependencies correctly identified
|
||||
- ✅ Timeline stages respect dependencies
|
||||
- ✅ All progress files have complete initial structure
|
||||
- ✅ Test patterns are valid and specific
|
||||
- ✅ Risk assessments are realistic
|
||||
- ✅ Estimated times are reasonable
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
**Execution Agent** (per group):
|
||||
```javascript
|
||||
Task({
|
||||
subagent_type: "cli-execute-agent",
|
||||
description: `Fix ${group.findings.length} issues: ${group.group_name}`,
|
||||
prompt: `
|
||||
## Task Objective
|
||||
Execute fixes for code review findings in group ${group.group_id}. Update progress file in real-time with flow control tracking.
|
||||
|
||||
## Assignment
|
||||
- Group ID: ${group.group_id}
|
||||
- Group Name: ${group.group_name}
|
||||
- Progress File: ${sessionDir}/${group.progress_file}
|
||||
- Findings Count: ${group.findings.length}
|
||||
- Max Iterations: ${maxIterations} (per finding)
|
||||
|
||||
## Fix Strategy
|
||||
${JSON.stringify(group.fix_strategy, null, 2)}
|
||||
|
||||
## Risk Assessment
|
||||
${JSON.stringify(group.risk_assessment, null, 2)}
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Initialization (Before Starting)
|
||||
|
||||
1. Read ${group.progress_file} to load initial state
|
||||
2. Update progress file:
|
||||
- assigned_agent: "${agentId}"
|
||||
- status: "in-progress"
|
||||
- started_at: Current ISO 8601 timestamp
|
||||
- last_update: Current ISO 8601 timestamp
|
||||
3. Write updated state back to ${group.progress_file}
|
||||
|
||||
### Main Execution Loop
|
||||
|
||||
For EACH finding in ${group.progress_file}.findings:
|
||||
|
||||
#### Step 1: Analyze Context
|
||||
|
||||
**Before Step**:
|
||||
- Update finding: status→"in-progress", started_at→now()
|
||||
- Update current_finding: Populate with finding details, status→"analyzing", action→"Reading file and understanding code structure"
|
||||
- Update phase→"analyzing"
|
||||
- Update flow_control: Add "analyze_context" step to implementation_approach (status→"in-progress"), set current_step→"analyze_context"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Read file: finding.file
|
||||
- Understand code structure around line: finding.line
|
||||
- Analyze surrounding context (imports, dependencies, related functions)
|
||||
- Review recommendations: finding.recommendations
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "analyze_context" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 2: Apply Fix
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"fixing", action→"Applying code changes per recommendations"
|
||||
- Update phase→"fixing"
|
||||
- Update flow_control: Add "apply_fix" step to implementation_approach (status→"in-progress"), set current_step→"apply_fix"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Use Edit tool to implement code changes per finding.recommendations
|
||||
- Follow fix_strategy.approach
|
||||
- Maintain code style and existing patterns
|
||||
|
||||
**After Step**:
|
||||
- Update flow_control: Mark "apply_fix" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### Step 3: Test Verification
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"testing", action→"Running test suite to verify fix"
|
||||
- Update phase→"testing"
|
||||
- Update flow_control: Add "run_tests" step to implementation_approach (status→"in-progress"), set current_step→"run_tests"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Run tests using fix_strategy.test_pattern
|
||||
- Require 100% pass rate
|
||||
- Capture test output
|
||||
|
||||
**On Test Failure**:
|
||||
- Git rollback: \`git checkout -- \${finding.file}\`
|
||||
- Increment finding.attempts
|
||||
- Update flow_control: Mark "run_tests" step as "failed" with completed_at→now()
|
||||
- Update errors: Add entry (finding_id, error_type→"test_failure", message, timestamp)
|
||||
- If finding.attempts < ${maxIterations}:
|
||||
- Reset flow_control: implementation_approach→[], current_step→null
|
||||
- Retry from Step 1
|
||||
- Else:
|
||||
- Update finding: status→"completed", result→"failed", error_message→"Max iterations reached", completed_at→now()
|
||||
- Update summary counts, move to next finding
|
||||
|
||||
**On Test Success**:
|
||||
- Update flow_control: Mark "run_tests" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
- Proceed to Step 4
|
||||
|
||||
#### Step 4: Commit Changes
|
||||
|
||||
**Before Step**:
|
||||
- Update current_finding: status→"committing", action→"Creating git commit for successful fix"
|
||||
- Update phase→"committing"
|
||||
- Update flow_control: Add "commit_changes" step to implementation_approach (status→"in-progress"), set current_step→"commit_changes"
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
**Action**:
|
||||
- Git commit: \`git commit -m "fix(${finding.dimension}): ${finding.title} [${finding.id}]"\`
|
||||
- Capture commit hash
|
||||
|
||||
**After Step**:
|
||||
- Update finding: status→"completed", result→"fixed", commit_hash→<captured>, test_passed→true, completed_at→now()
|
||||
- Update flow_control: Mark "commit_changes" step as "completed" with completed_at→now()
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
#### After Each Finding
|
||||
|
||||
- Update summary: Recalculate counts (pending/in_progress/fixed/failed) and percent_complete
|
||||
- If all findings completed: Clear current_finding, reset flow_control
|
||||
- Update last_update→now(), write to ${group.progress_file}
|
||||
|
||||
### Final Completion
|
||||
|
||||
When all findings processed:
|
||||
- Update status→"completed", phase→"done", summary.percent_complete→100.0
|
||||
- Update last_update→now(), write final state to ${group.progress_file}
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Progress File Updates
|
||||
- **MUST update after every significant action** (before/after each step)
|
||||
- **Always maintain complete structure** - never write partial updates
|
||||
- **Use ISO 8601 timestamps** - e.g., "2025-01-25T14:36:00Z"
|
||||
|
||||
### Flow Control Format
|
||||
Follow action-planning-agent flow_control.implementation_approach format:
|
||||
- step: Identifier (e.g., "analyze_context", "apply_fix")
|
||||
- action: Human-readable description
|
||||
- status: "pending" | "in-progress" | "completed" | "failed"
|
||||
- started_at: ISO 8601 timestamp or null
|
||||
- completed_at: ISO 8601 timestamp or null
|
||||
|
||||
### Error Handling
|
||||
- Capture all errors in errors[] array
|
||||
- Never leave progress file in invalid state
|
||||
- Always write complete updates, never partial
|
||||
- On unrecoverable error: Mark group as failed, preserve state
|
||||
|
||||
## Test Patterns
|
||||
Use fix_strategy.test_pattern to run affected tests:
|
||||
- Pattern: ${group.fix_strategy.test_pattern}
|
||||
- Command: Infer from project (npm test, pytest, etc.)
|
||||
- Pass Criteria: 100% pass rate required
|
||||
`
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Planning Failures**:
|
||||
- Invalid template → Abort with error message
|
||||
- Insufficient findings data → Request complete export
|
||||
- Planning timeout → Retry once, then fail gracefully
|
||||
|
||||
**Execution Failures**:
|
||||
- Agent crash → Mark group as failed, continue with other groups
|
||||
- Test command not found → Skip test verification, warn user
|
||||
- Git operations fail → Abort with error, preserve state
|
||||
|
||||
**Rollback Scenarios**:
|
||||
- Test failure after fix → Automatic `git checkout` rollback
|
||||
- Max iterations reached → Leave file unchanged, mark as failed
|
||||
- Unrecoverable error → Rollback entire group, save checkpoint
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
**Initialization**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed"},
|
||||
{content: "Phase 2: Planning", status: "in_progress"},
|
||||
{content: "Phase 3: Execution", status: "pending"},
|
||||
{content: "Phase 4: Completion", status: "pending"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
**During Execution**:
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{content: "Phase 1: Discovery & Initialization", status: "completed"},
|
||||
{content: "Phase 2: Planning", status: "completed"},
|
||||
{content: "Phase 3: Execution", status: "in_progress"},
|
||||
{content: " → Stage 1: Parallel execution (3 groups)", status: "completed"},
|
||||
{content: " • Group G1: Auth validation (2 findings)", status: "completed"},
|
||||
{content: " • Group G2: Query security (3 findings)", status: "completed"},
|
||||
{content: " • Group G3: Config quality (1 finding)", status: "completed"},
|
||||
{content: " → Stage 2: Serial execution (1 group)", status: "in_progress"},
|
||||
{content: " • Group G4: Dependent fixes (2 findings)", status: "in_progress"},
|
||||
{content: "Phase 4: Completion", status: "pending"}
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
**Update Rules**:
|
||||
- Add stage items dynamically based on fix-plan.json timeline
|
||||
- Add group items per stage
|
||||
- Mark completed immediately after each group finishes
|
||||
- Update parent phase status when all child items complete
|
||||
|
||||
## Post-Completion Expansion
|
||||
|
||||
完成后询问用户是否扩展为issue(test/enhance/refactor/doc),选中项调用 `/issue:new "{summary} - {dimension}"`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Trust AI Planning**: Planning agent's grouping and execution strategy are based on dependency analysis
|
||||
2. **Conservative Approach**: Test verification is mandatory - no fixes kept without passing tests
|
||||
3. **Parallel Efficiency**: Default 3 concurrent agents balances speed and resource usage
|
||||
4. **Resume Support**: Fix sessions can resume from checkpoints after interruption
|
||||
5. **Manual Review**: Always review failed fixes manually - may require architectural changes
|
||||
6. **Incremental Fixing**: Start with small batches (5-10 findings) before large-scale fixes
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Fix Progress
|
||||
Use `ccw view` to open the workflow dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
771
.claude/commands/workflow/review-module-cycle.md
Normal file
771
.claude/commands/workflow/review-module-cycle.md
Normal file
@@ -0,0 +1,771 @@
|
||||
---
|
||||
name: review-module-cycle
|
||||
description: Independent multi-dimensional code review for specified modules/files. Analyzes specific code paths across 7 dimensions with hybrid parallel-iterative execution, independent of workflow sessions.
|
||||
argument-hint: "<path-pattern> [--dimensions=security,architecture,...] [--max-iterations=N]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# Workflow Review-Module-Cycle Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Review specific module (all 7 dimensions)
|
||||
/workflow:review-module-cycle src/auth/**
|
||||
|
||||
# Review multiple modules
|
||||
/workflow:review-module-cycle src/auth/**,src/payment/**
|
||||
|
||||
# Review with custom dimensions
|
||||
/workflow:review-module-cycle src/payment/** --dimensions=security,architecture,quality
|
||||
|
||||
# Review specific files
|
||||
/workflow:review-module-cycle src/payment/processor.ts,src/payment/validator.ts
|
||||
```
|
||||
|
||||
**Review Scope**: Specified modules/files only (independent of git history)
|
||||
**Session Requirement**: Auto-creates workflow session via `/workflow:session:start`
|
||||
**Output Directory**: `.workflow/active/WFS-{session-id}/.review/` (session-based)
|
||||
**Default Dimensions**: Security, Architecture, Quality, Action-Items, Performance, Maintainability, Best-Practices
|
||||
**Max Iterations**: 3 (adjustable via --max-iterations)
|
||||
**Default Iterations**: 1 (deep-dive runs once; use --max-iterations=0 to skip)
|
||||
**CLI Tools**: Gemini → Qwen → Codex (fallback chain)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Independent multi-dimensional code review orchestrator with **hybrid parallel-iterative execution** for comprehensive quality assessment of **specific modules or files**.
|
||||
|
||||
**Review Scope**:
|
||||
- **Module-based**: Reviews specified file patterns (e.g., `src/auth/**`, `*.ts`)
|
||||
- **Session-integrated**: Runs within workflow session context for unified tracking
|
||||
- **Output location**: `.review/` subdirectory within active session
|
||||
|
||||
**vs Session Review**:
|
||||
- **Session Review** (`review-session-cycle`): Reviews git changes within a workflow session
|
||||
- **Module Review** (`review-module-cycle`): Reviews any specified code paths, regardless of git history
|
||||
- **Common output**: Both use same `.review/` directory structure within session
|
||||
|
||||
### Value Proposition
|
||||
1. **Module-Focused Review**: Target specific code areas independent of git history
|
||||
2. **Session-Integrated**: Review results tracked within workflow session for unified management
|
||||
3. **Comprehensive Coverage**: Same 7 specialized dimensions as session review
|
||||
4. **Intelligent Prioritization**: Automatic identification of critical issues and cross-cutting concerns
|
||||
5. **Unified Archive**: Review results archived with session for historical reference
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for independent multi-dimensional module review
|
||||
- Manages: dimension coordination, aggregation, iteration control, progress tracking
|
||||
- Delegates: Code exploration and analysis to @cli-explore-agent, dimension-specific reviews via Deep Scan mode
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Resolve file patterns, validate paths, initialize state, create output structure
|
||||
|
||||
Phase 2: Parallel Reviews (for each dimension)
|
||||
├─ Launch 7 review agents simultaneously
|
||||
├─ Each executes CLI analysis via Gemini/Qwen on specified files
|
||||
├─ Generate dimension JSON + markdown reports
|
||||
└─ Update review-progress.json
|
||||
|
||||
Phase 3: Aggregation
|
||||
├─ Load all dimension JSON files
|
||||
├─ Calculate severity distribution (critical/high/medium/low)
|
||||
├─ Identify cross-cutting concerns (files in 3+ dimensions)
|
||||
└─ Decision:
|
||||
├─ Critical findings OR high > 5 OR critical files → Phase 4 (Iterate)
|
||||
└─ Else → Phase 5 (Complete)
|
||||
|
||||
Phase 4: Iterative Deep-Dive (optional)
|
||||
├─ Select critical findings (max 5 per iteration)
|
||||
├─ Launch deep-dive agents for root cause analysis
|
||||
├─ Generate remediation plans with impact assessment
|
||||
├─ Re-assess severity based on analysis
|
||||
└─ Loop until no critical findings OR max iterations
|
||||
|
||||
Phase 5: Completion
|
||||
└─ Finalize review-progress.json
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Phase control, path resolution, state management, aggregation logic, iteration control |
|
||||
| **@cli-explore-agent** (Review) | Execute dimension-specific code analysis via Deep Scan mode, generate findings JSON with dual-source strategy (Bash + Gemini), create structured analysis reports |
|
||||
| **@cli-explore-agent** (Deep-dive) | Focused root cause analysis using dependency mapping, remediation planning with architectural insights, impact assessment, severity re-assessment |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Review Dimensions Configuration
|
||||
|
||||
**7 Specialized Dimensions** with priority-based allocation:
|
||||
|
||||
| Dimension | Template | Priority | Timeout |
|
||||
|-----------|----------|----------|---------|
|
||||
| **Security** | 03-assess-security-risks.txt | 1 (Critical) | 60min |
|
||||
| **Architecture** | 02-review-architecture.txt | 2 (High) | 60min |
|
||||
| **Quality** | 02-review-code-quality.txt | 3 (Medium) | 40min |
|
||||
| **Action-Items** | 02-analyze-code-patterns.txt | 2 (High) | 40min |
|
||||
| **Performance** | 03-analyze-performance.txt | 3 (Medium) | 60min |
|
||||
| **Maintainability** | 02-review-code-quality.txt* | 3 (Medium) | 40min |
|
||||
| **Best-Practices** | 03-review-quality-standards.txt | 3 (Medium) | 40min |
|
||||
|
||||
*Custom focus: "Assess technical debt and maintainability"
|
||||
|
||||
**Category Definitions by Dimension**:
|
||||
|
||||
```javascript
|
||||
const CATEGORIES = {
|
||||
security: ['injection', 'authentication', 'authorization', 'encryption', 'input-validation', 'access-control', 'data-exposure'],
|
||||
architecture: ['coupling', 'cohesion', 'layering', 'dependency', 'pattern-violation', 'scalability', 'separation-of-concerns'],
|
||||
quality: ['code-smell', 'duplication', 'complexity', 'naming', 'error-handling', 'testability', 'readability'],
|
||||
'action-items': ['requirement-coverage', 'acceptance-criteria', 'documentation', 'deployment-readiness', 'missing-functionality'],
|
||||
performance: ['n-plus-one', 'inefficient-query', 'memory-leak', 'blocking-operation', 'caching', 'resource-usage'],
|
||||
maintainability: ['technical-debt', 'magic-number', 'long-method', 'large-class', 'dead-code', 'commented-code'],
|
||||
'best-practices': ['convention-violation', 'anti-pattern', 'deprecated-api', 'missing-validation', 'inconsistent-style']
|
||||
};
|
||||
```
|
||||
|
||||
### 2. Path Pattern Resolution
|
||||
|
||||
**Syntax Rules**:
|
||||
- All paths are **relative** from project root (e.g., `src/auth/**` not `/src/auth/**`)
|
||||
- Multiple patterns: comma-separated, **no spaces** (e.g., `src/auth/**,src/payment/**`)
|
||||
- Glob and specific files can be mixed (e.g., `src/auth/**,src/config.ts`)
|
||||
|
||||
**Supported Patterns**:
|
||||
| Pattern Type | Example | Description |
|
||||
|--------------|---------|-------------|
|
||||
| Glob directory | `src/auth/**` | All files under src/auth/ |
|
||||
| Glob with extension | `src/**/*.ts` | All .ts files under src/ |
|
||||
| Specific file | `src/payment/processor.ts` | Single file |
|
||||
| Multiple patterns | `src/auth/**,src/payment/**` | Comma-separated (no spaces) |
|
||||
|
||||
**Resolution Process**:
|
||||
1. Parse input pattern (split by comma, trim whitespace)
|
||||
2. Expand glob patterns to file list via `find` command
|
||||
3. Validate all files exist and are readable
|
||||
4. Error if pattern matches 0 files
|
||||
5. Store resolved file list in review-state.json
|
||||
|
||||
### 3. Aggregation Logic
|
||||
|
||||
**Cross-Cutting Concern Detection**:
|
||||
1. Files appearing in 3+ dimensions = **Critical Files**
|
||||
2. Same issue pattern across dimensions = **Systemic Issue**
|
||||
3. Severity clustering in specific files = **Hotspots**
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
### 4. Severity Assessment
|
||||
|
||||
**Severity Levels**:
|
||||
- **Critical**: Security vulnerabilities, data corruption risks, system-wide failures, authentication/authorization bypass
|
||||
- **High**: Feature degradation, performance bottlenecks, architecture violations, significant technical debt
|
||||
- **Medium**: Code smells, minor performance issues, style inconsistencies, maintainability concerns
|
||||
- **Low**: Documentation gaps, minor refactoring opportunities, cosmetic issues
|
||||
|
||||
**Iteration Trigger**:
|
||||
- Critical findings > 0 OR
|
||||
- High findings > 5 OR
|
||||
- Critical files count > 0
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
|
||||
**Step 1: Session Creation**
|
||||
```javascript
|
||||
// Create workflow session for this review (type: review)
|
||||
SlashCommand(command="/workflow:session:start --type review \"Code review for [target_pattern]\"")
|
||||
|
||||
// Parse output
|
||||
const sessionId = output.match(/SESSION_ID: (WFS-[^\s]+)/)[1];
|
||||
```
|
||||
|
||||
**Step 2: Path Resolution & Validation**
|
||||
```bash
|
||||
# Expand glob pattern to file list (relative paths from project root)
|
||||
find . -path "./src/auth/**" -type f | sed 's|^\./||'
|
||||
|
||||
# Validate files exist and are readable
|
||||
for file in ${resolvedFiles[@]}; do
|
||||
test -r "$file" || error "File not readable: $file"
|
||||
done
|
||||
```
|
||||
- Parse and expand file patterns (glob support): `src/auth/**` → actual file list
|
||||
- Validation: Ensure all specified files exist and are readable
|
||||
- Store as **relative paths** from project root (e.g., `src/auth/service.ts`)
|
||||
- Agents construct absolute paths dynamically during execution
|
||||
|
||||
**Step 3: Output Directory Setup**
|
||||
- Output directory: `.workflow/active/${sessionId}/.review/`
|
||||
- Create directory structure:
|
||||
```bash
|
||||
mkdir -p ${sessionDir}/.review/{dimensions,iterations,reports}
|
||||
```
|
||||
|
||||
**Step 4: Initialize Review State**
|
||||
- State initialization: Create `review-state.json` with metadata, dimensions, max_iterations, resolved_files (merged metadata + state)
|
||||
- Progress tracking: Create `review-progress.json` for progress tracking
|
||||
|
||||
**Step 5: TodoWrite Initialization**
|
||||
- Set up progress tracking with hierarchical structure
|
||||
- Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
**Phase 2: Parallel Review Coordination**
|
||||
- Launch 7 @cli-explore-agent instances simultaneously (Deep Scan mode)
|
||||
- Pass dimension-specific context (template, timeout, custom focus, **target files**)
|
||||
- Monitor completion via review-progress.json updates
|
||||
- TodoWrite updates: Mark dimensions as completed
|
||||
- CLI tool fallback: Gemini → Qwen → Codex (on error/timeout)
|
||||
|
||||
**Phase 3: Aggregation**
|
||||
- Load all dimension JSON files from dimensions/
|
||||
- Calculate severity distribution: Count by critical/high/medium/low
|
||||
- Identify cross-cutting concerns: Files in 3+ dimensions
|
||||
- Select deep-dive findings: Critical + high in critical files (max 5)
|
||||
- Decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Update review-state.json with aggregation results
|
||||
|
||||
**Phase 4: Iteration Control**
|
||||
- Check iteration count < max_iterations (default 3)
|
||||
- Launch deep-dive agents for selected findings
|
||||
- Collect remediation plans and re-assessed severities
|
||||
- Update severity distribution based on re-assessments
|
||||
- Record iteration in review-state.json
|
||||
- Loop back to aggregation if still have critical/high findings
|
||||
|
||||
**Phase 5: Completion**
|
||||
- Finalize review-progress.json with completion statistics
|
||||
- Update review-state.json with completion_time and phase=complete
|
||||
- TodoWrite completion: Mark all tasks done
|
||||
|
||||
|
||||
|
||||
### Output File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine (includes metadata)
|
||||
├── review-progress.json # Real-time progress for dashboard
|
||||
├── dimensions/ # Per-dimension results
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
└── reports/ # Human-readable reports
|
||||
├── security-analysis.md
|
||||
├── security-cli-output.txt
|
||||
├── deep-dive-1-{uuid}.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
**Session Context**:
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
├── .summaries/
|
||||
└── .review/ # Review results (this command)
|
||||
└── (structure above)
|
||||
```
|
||||
|
||||
### Review State JSON
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "module",
|
||||
"session_id": "WFS-auth-system",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"target_pattern": "src/auth/**",
|
||||
"resolved_files": [
|
||||
"src/auth/service.ts",
|
||||
"src/auth/validator.ts",
|
||||
"src/auth/middleware.ts"
|
||||
],
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [...],
|
||||
"iterations": [...],
|
||||
"completion_criteria": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Review Progress JSON
|
||||
|
||||
**Purpose**: Real-time dashboard updates via polling
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"last_update": "2025-01-25T14:35:10Z",
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"progress": {
|
||||
"parallel_review": {
|
||||
"total_dimensions": 7,
|
||||
"completed": 5,
|
||||
"in_progress": 2,
|
||||
"percent_complete": 71
|
||||
},
|
||||
"deep_dive": {
|
||||
"total_findings": 6,
|
||||
"analyzed": 2,
|
||||
"in_progress": 1,
|
||||
"percent_complete": 33
|
||||
}
|
||||
},
|
||||
"agent_status": [
|
||||
{
|
||||
"agent_type": "review-agent",
|
||||
"dimension": "security",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-25T14:30:00Z",
|
||||
"completed_at": "2025-01-25T15:15:00Z",
|
||||
"duration_ms": 2700000
|
||||
},
|
||||
{
|
||||
"agent_type": "deep-dive-agent",
|
||||
"finding_id": "sec-001-uuid",
|
||||
"status": "in_progress",
|
||||
"started_at": "2025-01-25T14:32:00Z"
|
||||
}
|
||||
],
|
||||
"estimated_completion": "2025-01-25T16:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Agent Output Schemas
|
||||
|
||||
**Agent-produced JSON files follow standardized schemas**:
|
||||
|
||||
1. **Dimension Results** (cli-explore-agent output from parallel reviews)
|
||||
- Schema: `~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json`
|
||||
- Output: `{output-dir}/dimensions/{dimension}.json`
|
||||
- Contains: findings array, summary statistics, cross_references
|
||||
|
||||
2. **Deep-Dive Results** (cli-explore-agent output from iterations)
|
||||
- Schema: `~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json`
|
||||
- Output: `{output-dir}/iterations/iteration-{N}-finding-{uuid}.json`
|
||||
- Contains: root_cause, remediation_plan, impact_assessment, reassessed_severity
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Review Agent** (parallel execution, 7 instances):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Execute ${dimension} review analysis via Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Conduct comprehensive ${dimension} code exploration and analysis using Deep Scan mode (Bash + Gemini dual-source strategy) for specified module files
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Deep Scan mode** for this review:
|
||||
- Phase 1: Bash structural scan for standard patterns (classes, functions, imports)
|
||||
- Phase 2: Gemini semantic analysis for design intent, non-standard patterns, ${dimension}-specific concerns
|
||||
- Phase 3: Synthesis with attribution (bash-discovered vs gemini-discovered findings)
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read review state: ${reviewStateJsonPath}
|
||||
2. Get target files: Read resolved_files from review-state.json
|
||||
3. Validate file access: bash(ls -la ${targetFiles.join(' ')})
|
||||
4. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
|
||||
5. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
6. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
|
||||
|
||||
## Review Context
|
||||
- Review Type: module (independent)
|
||||
- Review Dimension: ${dimension}
|
||||
- Review ID: ${reviewId}
|
||||
- Target Pattern: ${targetPattern}
|
||||
- Resolved Files: ${resolvedFiles.length} files
|
||||
- Output Directory: ${outputDir}
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex (fallback chain)
|
||||
- Custom Focus: ${customFocus || 'Standard dimension analysis'}
|
||||
- Mode: analysis (READ-ONLY)
|
||||
- Context Pattern: ${targetFiles.map(f => `@${f}`).join(' ')}
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 4, follow schema exactly
|
||||
|
||||
1. Dimension Results JSON: ${outputDir}/dimensions/${dimension}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- dimension, review_id, analysis_timestamp (NOT timestamp/analyzed_at)
|
||||
- cli_tool_used (gemini|qwen|codex), model, analysis_duration_ms
|
||||
- summary (FLAT structure), findings, cross_references
|
||||
|
||||
Summary MUST be FLAT (NOT nested by_severity):
|
||||
\`{ "total_findings": N, "critical": N, "high": N, "medium": N, "low": N, "files_analyzed": N, "lines_reviewed": N }\`
|
||||
|
||||
Finding required fields:
|
||||
- id: format \`{dim}-{seq}-{uuid8}\` e.g., \`sec-001-a1b2c3d4\` (lowercase)
|
||||
- severity: lowercase only (critical|high|medium|low)
|
||||
- snippet (NOT code_snippet), impact (NOT exploit_scenario)
|
||||
- metadata, iteration (0), status (pending_remediation), cross_references
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/${dimension}-analysis.md
|
||||
- Human-readable summary with recommendations
|
||||
- Grouped by severity: critical → high → medium → low
|
||||
- Include file:line references for all findings
|
||||
|
||||
3. CLI Output Log: ${outputDir}/reports/${dimension}-cli-output.txt
|
||||
- Raw CLI tool output for debugging
|
||||
- Include full analysis text
|
||||
|
||||
## Dimension-Specific Guidance
|
||||
${getDimensionGuidance(dimension)}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-dimension-results-schema.json
|
||||
- [ ] All target files analyzed for ${dimension} concerns
|
||||
- [ ] All findings include file:line references with code snippets
|
||||
- [ ] Severity assessment follows established criteria (see reference)
|
||||
- [ ] Recommendations are actionable with code examples
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] Report is comprehensive and well-organized
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Deep-Dive Agent** (iteration execution):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Deep-dive analysis for critical finding: ${findingTitle} via Dependency Map + Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Perform focused root cause analysis using Dependency Map mode (for impact analysis) + Deep Scan mode (for semantic understanding) to generate comprehensive remediation plan for critical ${dimension} issue
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Dependency Map mode** first to understand dependencies:
|
||||
- Build dependency graph around ${file} to identify affected components
|
||||
- Detect circular dependencies or tight coupling related to this finding
|
||||
- Calculate change risk scores for remediation impact
|
||||
|
||||
Then apply **Deep Scan mode** for semantic analysis:
|
||||
- Understand design intent and architectural context
|
||||
- Identify non-standard patterns or implicit dependencies
|
||||
- Extract remediation insights from code structure
|
||||
|
||||
## Finding Context
|
||||
- Finding ID: ${findingId}
|
||||
- Original Dimension: ${dimension}
|
||||
- Title: ${findingTitle}
|
||||
- File: ${file}:${line}
|
||||
- Severity: ${severity}
|
||||
- Category: ${category}
|
||||
- Original Description: ${description}
|
||||
- Iteration: ${iteration}
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read original finding: ${dimensionJsonPath}
|
||||
2. Read affected file: ${file}
|
||||
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${projectDir}/src --include="*.ts")
|
||||
4. Read test files: bash(find ${projectDir}/tests -name "*${basename(file, '.ts')}*" -type f)
|
||||
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex
|
||||
- Template: ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Deep-Dive Results JSON: ${outputDir}/iterations/iteration-${iteration}-finding-${findingId}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- finding_id, dimension, iteration, analysis_timestamp
|
||||
- cli_tool_used, model, analysis_duration_ms
|
||||
- original_finding, root_cause, remediation_plan
|
||||
- impact_assessment, reassessed_severity, confidence_score, cross_references
|
||||
|
||||
All nested objects must follow schema exactly - read schema for field names
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/deep-dive-${iteration}-${findingId}.md
|
||||
- Detailed root cause analysis
|
||||
- Step-by-step remediation plan
|
||||
- Impact assessment and rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-deep-dive-results-schema.json
|
||||
- [ ] Root cause clearly identified with supporting evidence
|
||||
- [ ] Remediation plan is step-by-step actionable with exact file:line references
|
||||
- [ ] Each step includes specific commands and validation tests
|
||||
- [ ] Impact fully assessed (files, tests, breaking changes, dependencies)
|
||||
- [ ] Severity re-evaluation justified with evidence
|
||||
- [ ] Confidence score accurately reflects certainty of analysis
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] References include project-specific and external documentation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Dimension Guidance Reference
|
||||
|
||||
```javascript
|
||||
function getDimensionGuidance(dimension) {
|
||||
const guidance = {
|
||||
security: `
|
||||
Focus Areas:
|
||||
- Input validation and sanitization
|
||||
- Authentication and authorization mechanisms
|
||||
- Data encryption (at-rest and in-transit)
|
||||
- SQL/NoSQL injection vulnerabilities
|
||||
- XSS, CSRF, and other web vulnerabilities
|
||||
- Sensitive data exposure
|
||||
- Access control and privilege escalation
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Authentication bypass, SQL injection, RCE, sensitive data exposure
|
||||
- High: Missing authorization checks, weak encryption, exposed secrets
|
||||
- Medium: Missing input validation, insecure defaults, weak password policies
|
||||
- Low: Security headers missing, verbose error messages, outdated dependencies
|
||||
`,
|
||||
architecture: `
|
||||
Focus Areas:
|
||||
- Layering and separation of concerns
|
||||
- Coupling and cohesion
|
||||
- Design pattern adherence
|
||||
- Dependency management
|
||||
- Scalability and extensibility
|
||||
- Module boundaries
|
||||
- API design consistency
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Circular dependencies, god objects, tight coupling across layers
|
||||
- High: Violated architectural principles, scalability bottlenecks
|
||||
- Medium: Missing abstractions, inconsistent patterns, suboptimal design
|
||||
- Low: Minor coupling issues, documentation gaps, naming inconsistencies
|
||||
`,
|
||||
quality: `
|
||||
Focus Areas:
|
||||
- Code duplication
|
||||
- Complexity (cyclomatic, cognitive)
|
||||
- Naming conventions
|
||||
- Error handling patterns
|
||||
- Code readability
|
||||
- Comment quality
|
||||
- Dead code
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe complexity (CC > 20), massive duplication (>50 lines)
|
||||
- High: High complexity (CC > 10), significant duplication, poor error handling
|
||||
- Medium: Moderate complexity (CC > 5), naming issues, code smells
|
||||
- Low: Minor duplication, documentation gaps, cosmetic issues
|
||||
`,
|
||||
'action-items': `
|
||||
Focus Areas:
|
||||
- Requirements coverage verification
|
||||
- Acceptance criteria met
|
||||
- Documentation completeness
|
||||
- Deployment readiness
|
||||
- Missing functionality
|
||||
- Test coverage gaps
|
||||
- Configuration management
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Core requirements not met, deployment blockers
|
||||
- High: Significant functionality missing, acceptance criteria not met
|
||||
- Medium: Minor requirements gaps, documentation incomplete
|
||||
- Low: Nice-to-have features missing, minor documentation gaps
|
||||
`,
|
||||
performance: `
|
||||
Focus Areas:
|
||||
- N+1 query problems
|
||||
- Inefficient algorithms (O(n²) where O(n log n) possible)
|
||||
- Memory leaks
|
||||
- Blocking operations on main thread
|
||||
- Missing caching opportunities
|
||||
- Resource usage (CPU, memory, network)
|
||||
- Database query optimization
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Memory leaks, O(n²) in hot path, blocking main thread
|
||||
- High: N+1 queries, missing indexes, inefficient algorithms
|
||||
- Medium: Suboptimal caching, unnecessary computations, lazy loading issues
|
||||
- Low: Minor optimization opportunities, redundant operations
|
||||
`,
|
||||
maintainability: `
|
||||
Focus Areas:
|
||||
- Technical debt indicators
|
||||
- Magic numbers and hardcoded values
|
||||
- Long methods (>50 lines)
|
||||
- Large classes (>500 lines)
|
||||
- Dead code and commented code
|
||||
- Code documentation
|
||||
- Test coverage
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Massive methods (>200 lines), severe technical debt blocking changes
|
||||
- High: Large methods (>100 lines), significant dead code, undocumented complex logic
|
||||
- Medium: Magic numbers, moderate technical debt, missing tests
|
||||
- Low: Minor refactoring opportunities, cosmetic improvements
|
||||
`,
|
||||
'best-practices': `
|
||||
Focus Areas:
|
||||
- Framework conventions adherence
|
||||
- Language idioms
|
||||
- Anti-patterns
|
||||
- Deprecated API usage
|
||||
- Coding standards compliance
|
||||
- Error handling patterns
|
||||
- Logging and monitoring
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe anti-patterns, deprecated APIs with security risks
|
||||
- High: Major convention violations, poor error handling, missing logging
|
||||
- Medium: Minor anti-patterns, style inconsistencies, suboptimal patterns
|
||||
- Low: Cosmetic style issues, minor convention deviations
|
||||
`
|
||||
};
|
||||
|
||||
return guidance[dimension] || 'Standard code review analysis';
|
||||
}
|
||||
```
|
||||
|
||||
### Completion Conditions
|
||||
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings ≤ 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase-Level Error Matrix**:
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Invalid path pattern | Yes | Error and exit |
|
||||
| Phase 1 | No files matched | Yes | Error and exit |
|
||||
| Phase 1 | Files not readable | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
**CLI Fallback Chain**: Gemini → Qwen → Codex → degraded mode
|
||||
|
||||
**Fallback Triggers**:
|
||||
1. HTTP 429, 5xx errors, connection timeout
|
||||
2. Invalid JSON output (parse error, missing required fields)
|
||||
3. Low confidence score < 0.4
|
||||
4. Analysis too brief (< 100 words in report)
|
||||
|
||||
**Fallback Behavior**:
|
||||
- On trigger: Retry with next tool in chain
|
||||
- After Codex fails: Enter degraded mode (skip analysis, log error)
|
||||
- Degraded mode: Continue workflow with available results
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Initializing" },
|
||||
{ content: "Phase 2: Parallel Reviews (7 dimensions)", status: "in_progress", activeForm: "Reviewing" },
|
||||
{ content: " → Security review", status: "in_progress", activeForm: "Analyzing security" },
|
||||
// ... other dimensions as sub-items
|
||||
{ content: "Phase 3: Aggregation", status: "pending", activeForm: "Aggregating" },
|
||||
{ content: "Phase 4: Deep-dive", status: "pending", activeForm: "Deep-diving" },
|
||||
{ content: "Phase 5: Completion", status: "pending", activeForm: "Completing" }
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Specific**: Begin with focused module patterns for faster results
|
||||
2. **Expand Gradually**: Add more modules based on initial findings
|
||||
3. **Use Glob Wisely**: `src/auth/**` is more efficient than `src/**` with lots of irrelevant files
|
||||
4. **Trust Aggregation Logic**: Auto-selection based on proven heuristics
|
||||
5. **Monitor Logs**: Check reports/ directory for CLI analysis insights
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Review Progress
|
||||
Use `ccw view` to open the review dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Automated Fix Workflow
|
||||
After completing a module review, use the generated findings JSON for automated fixing:
|
||||
|
||||
```bash
|
||||
# Step 1: Complete review (this command)
|
||||
/workflow:review-module-cycle src/auth/**
|
||||
|
||||
# Step 2: Run automated fixes using dimension findings
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-{session-id}/.review/
|
||||
```
|
||||
|
||||
See `/workflow:review-cycle-fix` for automated fixing with smart grouping, parallel execution, and test verification.
|
||||
|
||||
782
.claude/commands/workflow/review-session-cycle.md
Normal file
782
.claude/commands/workflow/review-session-cycle.md
Normal file
@@ -0,0 +1,782 @@
|
||||
---
|
||||
name: review-session-cycle
|
||||
description: Session-based comprehensive multi-dimensional code review. Analyzes git changes from workflow session across 7 dimensions with hybrid parallel-iterative execution, aggregates findings, and performs focused deep-dives on critical issues until quality gates met.
|
||||
argument-hint: "[session-id] [--dimensions=security,architecture,...] [--max-iterations=N]"
|
||||
allowed-tools: SlashCommand(*), TodoWrite(*), Read(*), Bash(*), Task(*)
|
||||
---
|
||||
|
||||
# Workflow Review-Session-Cycle Command
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Execute comprehensive session review (all 7 dimensions)
|
||||
/workflow:review-session-cycle
|
||||
|
||||
# Review specific session with custom dimensions
|
||||
/workflow:review-session-cycle WFS-payment-integration --dimensions=security,architecture,quality
|
||||
|
||||
# Specify session and iteration limit
|
||||
/workflow:review-session-cycle WFS-payment-integration --max-iterations=5
|
||||
```
|
||||
|
||||
**Review Scope**: Git changes from session creation to present (via `git log --since`)
|
||||
**Session Requirement**: Requires active or completed workflow session
|
||||
**Output Directory**: `.workflow/active/WFS-{session-id}/.review/` (session-based)
|
||||
**Default Dimensions**: Security, Architecture, Quality, Action-Items, Performance, Maintainability, Best-Practices
|
||||
**Max Iterations**: 3 (adjustable via --max-iterations)
|
||||
**Default Iterations**: 1 (deep-dive runs once; use --max-iterations=0 to skip)
|
||||
**CLI Tools**: Gemini → Qwen → Codex (fallback chain)
|
||||
|
||||
## What & Why
|
||||
|
||||
### Core Concept
|
||||
Session-based multi-dimensional code review orchestrator with **hybrid parallel-iterative execution** for comprehensive quality assessment of **git changes within a workflow session**.
|
||||
|
||||
**Review Scope**:
|
||||
- **Session-based**: Reviews only files changed during the workflow session (via `git log --since="${sessionCreatedAt}"`)
|
||||
- **For independent module review**: Use `/workflow:review-module-cycle` command instead
|
||||
|
||||
**vs Standard Review**:
|
||||
- **Standard**: Sequential manual reviews → Inconsistent coverage → Missed cross-cutting concerns
|
||||
- **Review-Session-Cycle**: **Parallel automated analysis → Aggregate findings → Deep-dive critical issues** → Comprehensive coverage
|
||||
|
||||
### Value Proposition
|
||||
1. **Comprehensive Coverage**: 7 specialized dimensions analyze all quality aspects simultaneously
|
||||
2. **Intelligent Prioritization**: Automatic identification of critical issues and cross-cutting concerns
|
||||
3. **Actionable Insights**: Deep-dive iterations provide step-by-step remediation plans
|
||||
|
||||
### Orchestrator Boundary (CRITICAL)
|
||||
- **ONLY command** for comprehensive multi-dimensional review
|
||||
- Manages: dimension coordination, aggregation, iteration control, progress tracking
|
||||
- Delegates: Code exploration and analysis to @cli-explore-agent, dimension-specific reviews via Deep Scan mode
|
||||
|
||||
## How It Works
|
||||
|
||||
### Execution Flow (Simplified)
|
||||
|
||||
```
|
||||
Phase 1: Discovery & Initialization
|
||||
└─ Validate session, initialize state, create output structure
|
||||
|
||||
Phase 2: Parallel Reviews (for each dimension)
|
||||
├─ Launch 7 review agents simultaneously
|
||||
├─ Each executes CLI analysis via Gemini/Qwen
|
||||
├─ Generate dimension JSON + markdown reports
|
||||
└─ Update review-progress.json
|
||||
|
||||
Phase 3: Aggregation
|
||||
├─ Load all dimension JSON files
|
||||
├─ Calculate severity distribution (critical/high/medium/low)
|
||||
├─ Identify cross-cutting concerns (files in 3+ dimensions)
|
||||
└─ Decision:
|
||||
├─ Critical findings OR high > 5 OR critical files → Phase 4 (Iterate)
|
||||
└─ Else → Phase 5 (Complete)
|
||||
|
||||
Phase 4: Iterative Deep-Dive (optional)
|
||||
├─ Select critical findings (max 5 per iteration)
|
||||
├─ Launch deep-dive agents for root cause analysis
|
||||
├─ Generate remediation plans with impact assessment
|
||||
├─ Re-assess severity based on analysis
|
||||
└─ Loop until no critical findings OR max iterations
|
||||
|
||||
Phase 5: Completion
|
||||
└─ Finalize review-progress.json
|
||||
```
|
||||
|
||||
### Agent Roles
|
||||
|
||||
| Agent | Responsibility |
|
||||
|-------|---------------|
|
||||
| **Orchestrator** | Phase control, session discovery, state management, aggregation logic, iteration control |
|
||||
| **@cli-explore-agent** (Review) | Execute dimension-specific code analysis via Deep Scan mode, generate findings JSON with dual-source strategy (Bash + Gemini), create structured analysis reports |
|
||||
| **@cli-explore-agent** (Deep-dive) | Focused root cause analysis using dependency mapping, remediation planning with architectural insights, impact assessment, severity re-assessment |
|
||||
|
||||
## Enhanced Features
|
||||
|
||||
### 1. Review Dimensions Configuration
|
||||
|
||||
**7 Specialized Dimensions** with priority-based allocation:
|
||||
|
||||
| Dimension | Template | Priority | Timeout |
|
||||
|-----------|----------|----------|---------|
|
||||
| **Security** | 03-assess-security-risks.txt | 1 (Critical) | 60min |
|
||||
| **Architecture** | 02-review-architecture.txt | 2 (High) | 60min |
|
||||
| **Quality** | 02-review-code-quality.txt | 3 (Medium) | 40min |
|
||||
| **Action-Items** | 02-analyze-code-patterns.txt | 2 (High) | 40min |
|
||||
| **Performance** | 03-analyze-performance.txt | 3 (Medium) | 60min |
|
||||
| **Maintainability** | 02-review-code-quality.txt* | 3 (Medium) | 40min |
|
||||
| **Best-Practices** | 03-review-quality-standards.txt | 3 (Medium) | 40min |
|
||||
|
||||
*Custom focus: "Assess technical debt and maintainability"
|
||||
|
||||
**Category Definitions by Dimension**:
|
||||
|
||||
```javascript
|
||||
const CATEGORIES = {
|
||||
security: ['injection', 'authentication', 'authorization', 'encryption', 'input-validation', 'access-control', 'data-exposure'],
|
||||
architecture: ['coupling', 'cohesion', 'layering', 'dependency', 'pattern-violation', 'scalability', 'separation-of-concerns'],
|
||||
quality: ['code-smell', 'duplication', 'complexity', 'naming', 'error-handling', 'testability', 'readability'],
|
||||
'action-items': ['requirement-coverage', 'acceptance-criteria', 'documentation', 'deployment-readiness', 'missing-functionality'],
|
||||
performance: ['n-plus-one', 'inefficient-query', 'memory-leak', 'blocking-operation', 'caching', 'resource-usage'],
|
||||
maintainability: ['technical-debt', 'magic-number', 'long-method', 'large-class', 'dead-code', 'commented-code'],
|
||||
'best-practices': ['convention-violation', 'anti-pattern', 'deprecated-api', 'missing-validation', 'inconsistent-style']
|
||||
};
|
||||
```
|
||||
|
||||
### 2. Aggregation Logic
|
||||
|
||||
**Cross-Cutting Concern Detection**:
|
||||
1. Files appearing in 3+ dimensions = **Critical Files**
|
||||
2. Same issue pattern across dimensions = **Systemic Issue**
|
||||
3. Severity clustering in specific files = **Hotspots**
|
||||
|
||||
**Deep-Dive Selection Criteria**:
|
||||
- All critical severity findings (priority 1)
|
||||
- Top 3 high-severity findings in critical files (priority 2)
|
||||
- Max 5 findings per iteration (prevent overwhelm)
|
||||
|
||||
### 3. Severity Assessment
|
||||
|
||||
**Severity Levels**:
|
||||
- **Critical**: Security vulnerabilities, data corruption risks, system-wide failures, authentication/authorization bypass
|
||||
- **High**: Feature degradation, performance bottlenecks, architecture violations, significant technical debt
|
||||
- **Medium**: Code smells, minor performance issues, style inconsistencies, maintainability concerns
|
||||
- **Low**: Documentation gaps, minor refactoring opportunities, cosmetic issues
|
||||
|
||||
**Iteration Trigger**:
|
||||
- Critical findings > 0 OR
|
||||
- High findings > 5 OR
|
||||
- Critical files count > 0
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
### Orchestrator
|
||||
|
||||
**Phase 1: Discovery & Initialization**
|
||||
|
||||
**Step 1: Session Discovery**
|
||||
```javascript
|
||||
// If session ID not provided, auto-detect
|
||||
if (!providedSessionId) {
|
||||
// Check for active sessions
|
||||
const activeSessions = Glob('.workflow/active/WFS-*');
|
||||
if (activeSessions.length === 1) {
|
||||
sessionId = activeSessions[0].match(/WFS-[^/]+/)[0];
|
||||
} else if (activeSessions.length > 1) {
|
||||
// List sessions and prompt user
|
||||
error("Multiple active sessions found. Please specify session ID.");
|
||||
} else {
|
||||
error("No active session found. Create session first with /workflow:session:start");
|
||||
}
|
||||
} else {
|
||||
sessionId = providedSessionId;
|
||||
}
|
||||
|
||||
// Validate session exists
|
||||
Bash(`test -d .workflow/active/${sessionId} && echo "EXISTS"`);
|
||||
```
|
||||
|
||||
**Step 2: Session Validation**
|
||||
- Ensure session has implementation artifacts (check `.summaries/` or `.task/` directory)
|
||||
- Extract session creation timestamp from `workflow-session.json`
|
||||
- Use timestamp for git log filtering: `git log --since="${sessionCreatedAt}"`
|
||||
|
||||
**Step 3: Changed Files Detection**
|
||||
```bash
|
||||
# Get files changed since session creation
|
||||
git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
**Step 4: Output Directory Setup**
|
||||
- Output directory: `.workflow/active/${sessionId}/.review/`
|
||||
- Create directory structure:
|
||||
```bash
|
||||
mkdir -p ${sessionDir}/.review/{dimensions,iterations,reports}
|
||||
```
|
||||
|
||||
**Step 5: Initialize Review State**
|
||||
- State initialization: Create `review-state.json` with metadata, dimensions, max_iterations (merged metadata + state)
|
||||
- Progress tracking: Create `review-progress.json` for progress tracking
|
||||
|
||||
**Step 6: TodoWrite Initialization**
|
||||
- Set up progress tracking with hierarchical structure
|
||||
- Mark Phase 1 completed, Phase 2 in_progress
|
||||
|
||||
**Phase 2: Parallel Review Coordination**
|
||||
- Launch 7 @cli-explore-agent instances simultaneously (Deep Scan mode)
|
||||
- Pass dimension-specific context (template, timeout, custom focus)
|
||||
- Monitor completion via review-progress.json updates
|
||||
- TodoWrite updates: Mark dimensions as completed
|
||||
- CLI tool fallback: Gemini → Qwen → Codex (on error/timeout)
|
||||
|
||||
**Phase 3: Aggregation**
|
||||
- Load all dimension JSON files from dimensions/
|
||||
- Calculate severity distribution: Count by critical/high/medium/low
|
||||
- Identify cross-cutting concerns: Files in 3+ dimensions
|
||||
- Select deep-dive findings: Critical + high in critical files (max 5)
|
||||
- Decision logic: Iterate if critical > 0 OR high > 5 OR critical files exist
|
||||
- Update review-state.json with aggregation results
|
||||
|
||||
**Phase 4: Iteration Control**
|
||||
- Check iteration count < max_iterations (default 3)
|
||||
- Launch deep-dive agents for selected findings
|
||||
- Collect remediation plans and re-assessed severities
|
||||
- Update severity distribution based on re-assessments
|
||||
- Record iteration in review-state.json
|
||||
- Loop back to aggregation if still have critical/high findings
|
||||
|
||||
**Phase 5: Completion**
|
||||
- Finalize review-progress.json with completion statistics
|
||||
- Update review-state.json with completion_time and phase=complete
|
||||
- TodoWrite completion: Mark all tasks done
|
||||
|
||||
|
||||
|
||||
### Session File Structure
|
||||
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/.review/
|
||||
├── review-state.json # Orchestrator state machine (includes metadata)
|
||||
├── review-progress.json # Real-time progress for dashboard
|
||||
├── dimensions/ # Per-dimension results
|
||||
│ ├── security.json
|
||||
│ ├── architecture.json
|
||||
│ ├── quality.json
|
||||
│ ├── action-items.json
|
||||
│ ├── performance.json
|
||||
│ ├── maintainability.json
|
||||
│ └── best-practices.json
|
||||
├── iterations/ # Deep-dive results
|
||||
│ ├── iteration-1-finding-{uuid}.json
|
||||
│ └── iteration-2-finding-{uuid}.json
|
||||
└── reports/ # Human-readable reports
|
||||
├── security-analysis.md
|
||||
├── security-cli-output.txt
|
||||
├── deep-dive-1-{uuid}.md
|
||||
└── ...
|
||||
```
|
||||
|
||||
**Session Context**:
|
||||
```
|
||||
.workflow/active/WFS-{session-id}/
|
||||
├── workflow-session.json
|
||||
├── IMPL_PLAN.md
|
||||
├── TODO_LIST.md
|
||||
├── .task/
|
||||
├── .summaries/
|
||||
└── .review/ # Review results (this command)
|
||||
└── (structure above)
|
||||
```
|
||||
|
||||
### Review State JSON
|
||||
|
||||
**Purpose**: Unified state machine and metadata (merged from metadata + state)
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "WFS-payment-integration",
|
||||
"review_id": "review-20250125-143022",
|
||||
"review_type": "session",
|
||||
"metadata": {
|
||||
"created_at": "2025-01-25T14:30:22Z",
|
||||
"git_changes": {
|
||||
"commit_range": "abc123..def456",
|
||||
"files_changed": 15,
|
||||
"insertions": 342,
|
||||
"deletions": 128
|
||||
},
|
||||
"dimensions": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"max_iterations": 3
|
||||
},
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"dimensions_reviewed": ["security", "architecture", "quality", "action-items", "performance", "maintainability", "best-practices"],
|
||||
"selected_strategy": "comprehensive",
|
||||
"next_action": "execute_parallel_reviews|aggregate_findings|execute_deep_dive|generate_final_report|complete",
|
||||
"severity_distribution": {
|
||||
"critical": 2,
|
||||
"high": 5,
|
||||
"medium": 12,
|
||||
"low": 8
|
||||
},
|
||||
"critical_files": [
|
||||
{
|
||||
"file": "src/payment/processor.ts",
|
||||
"finding_count": 5,
|
||||
"dimensions": ["security", "architecture", "quality"]
|
||||
}
|
||||
],
|
||||
"iterations": [
|
||||
{
|
||||
"iteration": 1,
|
||||
"findings_analyzed": ["uuid-1", "uuid-2"],
|
||||
"findings_resolved": 1,
|
||||
"findings_escalated": 1,
|
||||
"severity_change": {
|
||||
"before": {"critical": 2, "high": 5, "medium": 12, "low": 8},
|
||||
"after": {"critical": 1, "high": 6, "medium": 12, "low": 8}
|
||||
},
|
||||
"timestamp": "2025-01-25T14:30:00Z"
|
||||
}
|
||||
],
|
||||
"completion_criteria": {
|
||||
"target": "no_critical_findings_and_high_under_5",
|
||||
"current_status": "in_progress",
|
||||
"estimated_completion": "2 iterations remaining"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Field Descriptions**:
|
||||
- `phase`: Current execution phase (state machine pointer)
|
||||
- `current_iteration`: Iteration counter (used for max check)
|
||||
- `next_action`: Next step orchestrator should execute
|
||||
- `severity_distribution`: Aggregated counts across all dimensions
|
||||
- `critical_files`: Files appearing in 3+ dimensions with metadata
|
||||
- `iterations[]`: Historical log for trend analysis
|
||||
|
||||
### Review Progress JSON
|
||||
|
||||
**Purpose**: Real-time dashboard updates via polling
|
||||
|
||||
```json
|
||||
{
|
||||
"review_id": "review-20250125-143022",
|
||||
"last_update": "2025-01-25T14:35:10Z",
|
||||
"phase": "parallel|aggregate|iterate|complete",
|
||||
"current_iteration": 1,
|
||||
"progress": {
|
||||
"parallel_review": {
|
||||
"total_dimensions": 7,
|
||||
"completed": 5,
|
||||
"in_progress": 2,
|
||||
"percent_complete": 71
|
||||
},
|
||||
"deep_dive": {
|
||||
"total_findings": 6,
|
||||
"analyzed": 2,
|
||||
"in_progress": 1,
|
||||
"percent_complete": 33
|
||||
}
|
||||
},
|
||||
"agent_status": [
|
||||
{
|
||||
"agent_type": "review-agent",
|
||||
"dimension": "security",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-25T14:30:00Z",
|
||||
"completed_at": "2025-01-25T15:15:00Z",
|
||||
"duration_ms": 2700000
|
||||
},
|
||||
{
|
||||
"agent_type": "deep-dive-agent",
|
||||
"finding_id": "sec-001-uuid",
|
||||
"status": "in_progress",
|
||||
"started_at": "2025-01-25T14:32:00Z"
|
||||
}
|
||||
],
|
||||
"estimated_completion": "2025-01-25T16:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Agent Output Schemas
|
||||
|
||||
**Agent-produced JSON files follow standardized schemas**:
|
||||
|
||||
1. **Dimension Results** (cli-explore-agent output from parallel reviews)
|
||||
- Schema: `~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json`
|
||||
- Output: `.review-cycle/dimensions/{dimension}.json`
|
||||
- Contains: findings array, summary statistics, cross_references
|
||||
|
||||
2. **Deep-Dive Results** (cli-explore-agent output from iterations)
|
||||
- Schema: `~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json`
|
||||
- Output: `.review-cycle/iterations/iteration-{N}-finding-{uuid}.json`
|
||||
- Contains: root_cause, remediation_plan, impact_assessment, reassessed_severity
|
||||
|
||||
### Agent Invocation Template
|
||||
|
||||
**Review Agent** (parallel execution, 7 instances):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Execute ${dimension} review analysis via Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Conduct comprehensive ${dimension} code exploration and analysis using Deep Scan mode (Bash + Gemini dual-source strategy) for completed implementation in session ${sessionId}
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Deep Scan mode** for this review:
|
||||
- Phase 1: Bash structural scan for standard patterns (classes, functions, imports)
|
||||
- Phase 2: Gemini semantic analysis for design intent, non-standard patterns, ${dimension}-specific concerns
|
||||
- Phase 3: Synthesis with attribution (bash-discovered vs gemini-discovered findings)
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read session metadata: ${sessionMetadataPath}
|
||||
2. Read completed task summaries: bash(find ${summariesDir} -name "IMPL-*.md" -type f)
|
||||
3. Get changed files: bash(cd ${workflowDir} && git log --since="${sessionCreatedAt}" --name-only --pretty=format: | sort -u)
|
||||
4. Read review state: ${reviewStateJsonPath}
|
||||
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-dimension-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints and conventions to validate against)
|
||||
|
||||
## Session Context
|
||||
- Session ID: ${sessionId}
|
||||
- Review Dimension: ${dimension}
|
||||
- Review ID: ${reviewId}
|
||||
- Implementation Phase: Complete (all tests passing)
|
||||
- Output Directory: ${outputDir}
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex (fallback chain)
|
||||
- Template: ~/.claude/workflows/cli-templates/prompts/analysis/${dimensionTemplate}
|
||||
- Custom Focus: ${customFocus || 'Standard dimension analysis'}
|
||||
- Timeout: ${timeout}ms
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Dimension Results JSON: ${outputDir}/dimensions/${dimension}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- dimension, review_id, analysis_timestamp (NOT timestamp/analyzed_at)
|
||||
- cli_tool_used (gemini|qwen|codex), model, analysis_duration_ms
|
||||
- summary (FLAT structure), findings, cross_references
|
||||
|
||||
Summary MUST be FLAT (NOT nested by_severity):
|
||||
\`{ "total_findings": N, "critical": N, "high": N, "medium": N, "low": N, "files_analyzed": N, "lines_reviewed": N }\`
|
||||
|
||||
Finding required fields:
|
||||
- id: format \`{dim}-{seq}-{uuid8}\` e.g., \`sec-001-a1b2c3d4\` (lowercase)
|
||||
- severity: lowercase only (critical|high|medium|low)
|
||||
- snippet (NOT code_snippet), impact (NOT exploit_scenario)
|
||||
- metadata, iteration (0), status (pending_remediation), cross_references
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/${dimension}-analysis.md
|
||||
- Human-readable summary with recommendations
|
||||
- Grouped by severity: critical → high → medium → low
|
||||
- Include file:line references for all findings
|
||||
|
||||
3. CLI Output Log: ${outputDir}/reports/${dimension}-cli-output.txt
|
||||
- Raw CLI tool output for debugging
|
||||
- Include full analysis text
|
||||
|
||||
## Dimension-Specific Guidance
|
||||
${getDimensionGuidance(dimension)}
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-dimension-results-schema.json
|
||||
- [ ] All changed files analyzed for ${dimension} concerns
|
||||
- [ ] All findings include file:line references with code snippets
|
||||
- [ ] Severity assessment follows established criteria (see reference)
|
||||
- [ ] Recommendations are actionable with code examples
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] Report is comprehensive and well-organized
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Deep-Dive Agent** (iteration execution):
|
||||
|
||||
```javascript
|
||||
Task(
|
||||
subagent_type="cli-explore-agent",
|
||||
run_in_background=false,
|
||||
description=`Deep-dive analysis for critical finding: ${findingTitle} via Dependency Map + Deep Scan`,
|
||||
prompt=`
|
||||
## Task Objective
|
||||
Perform focused root cause analysis using Dependency Map mode (for impact analysis) + Deep Scan mode (for semantic understanding) to generate comprehensive remediation plan for critical ${dimension} issue
|
||||
|
||||
## Analysis Mode Selection
|
||||
Use **Dependency Map mode** first to understand dependencies:
|
||||
- Build dependency graph around ${file} to identify affected components
|
||||
- Detect circular dependencies or tight coupling related to this finding
|
||||
- Calculate change risk scores for remediation impact
|
||||
|
||||
Then apply **Deep Scan mode** for semantic analysis:
|
||||
- Understand design intent and architectural context
|
||||
- Identify non-standard patterns or implicit dependencies
|
||||
- Extract remediation insights from code structure
|
||||
|
||||
## Finding Context
|
||||
- Finding ID: ${findingId}
|
||||
- Original Dimension: ${dimension}
|
||||
- Title: ${findingTitle}
|
||||
- File: ${file}:${line}
|
||||
- Severity: ${severity}
|
||||
- Category: ${category}
|
||||
- Original Description: ${description}
|
||||
- Iteration: ${iteration}
|
||||
|
||||
## MANDATORY FIRST STEPS (Execute by Agent)
|
||||
**You (cli-explore-agent) MUST execute these steps in order:**
|
||||
1. Read original finding: ${dimensionJsonPath}
|
||||
2. Read affected file: ${file}
|
||||
3. Identify related code: bash(grep -r "import.*${basename(file)}" ${workflowDir}/src --include="*.ts")
|
||||
4. Read test files: bash(find ${workflowDir}/tests -name "*${basename(file, '.ts')}*" -type f)
|
||||
5. Execute: cat ~/.claude/workflows/cli-templates/schemas/review-deep-dive-results-schema.json (get output schema reference)
|
||||
6. Read: .workflow/project-tech.json (technology stack and architecture context)
|
||||
7. Read: .workflow/project-guidelines.json (user-defined constraints for remediation compliance)
|
||||
|
||||
## CLI Configuration
|
||||
- Tool Priority: gemini → qwen → codex
|
||||
- Template: ~/.claude/workflows/cli-templates/prompts/analysis/01-diagnose-bug-root-cause.txt
|
||||
- Timeout: 2400000ms (40 minutes)
|
||||
- Mode: analysis (READ-ONLY)
|
||||
|
||||
## Expected Deliverables
|
||||
|
||||
**Schema Reference**: Schema obtained in MANDATORY FIRST STEPS step 5, follow schema exactly
|
||||
|
||||
1. Deep-Dive Results JSON: ${outputDir}/iterations/iteration-${iteration}-finding-${findingId}.json
|
||||
|
||||
**⚠️ CRITICAL JSON STRUCTURE REQUIREMENTS**:
|
||||
|
||||
Root structure MUST be array: \`[{ ... }]\` NOT \`{ ... }\`
|
||||
|
||||
Required top-level fields:
|
||||
- finding_id, dimension, iteration, analysis_timestamp
|
||||
- cli_tool_used, model, analysis_duration_ms
|
||||
- original_finding, root_cause, remediation_plan
|
||||
- impact_assessment, reassessed_severity, confidence_score, cross_references
|
||||
|
||||
All nested objects must follow schema exactly - read schema for field names
|
||||
|
||||
2. Analysis Report: ${outputDir}/reports/deep-dive-${iteration}-${findingId}.md
|
||||
- Detailed root cause analysis
|
||||
- Step-by-step remediation plan
|
||||
- Impact assessment and rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
- [ ] Schema obtained via cat review-deep-dive-results-schema.json
|
||||
- [ ] Root cause clearly identified with supporting evidence
|
||||
- [ ] Remediation plan is step-by-step actionable with exact file:line references
|
||||
- [ ] Each step includes specific commands and validation tests
|
||||
- [ ] Impact fully assessed (files, tests, breaking changes, dependencies)
|
||||
- [ ] Severity re-evaluation justified with evidence
|
||||
- [ ] Confidence score accurately reflects certainty of analysis
|
||||
- [ ] JSON output follows schema exactly
|
||||
- [ ] References include project-specific and external documentation
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
### Dimension Guidance Reference
|
||||
|
||||
```javascript
|
||||
function getDimensionGuidance(dimension) {
|
||||
const guidance = {
|
||||
security: `
|
||||
Focus Areas:
|
||||
- Input validation and sanitization
|
||||
- Authentication and authorization mechanisms
|
||||
- Data encryption (at-rest and in-transit)
|
||||
- SQL/NoSQL injection vulnerabilities
|
||||
- XSS, CSRF, and other web vulnerabilities
|
||||
- Sensitive data exposure
|
||||
- Access control and privilege escalation
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Authentication bypass, SQL injection, RCE, sensitive data exposure
|
||||
- High: Missing authorization checks, weak encryption, exposed secrets
|
||||
- Medium: Missing input validation, insecure defaults, weak password policies
|
||||
- Low: Security headers missing, verbose error messages, outdated dependencies
|
||||
`,
|
||||
architecture: `
|
||||
Focus Areas:
|
||||
- Layering and separation of concerns
|
||||
- Coupling and cohesion
|
||||
- Design pattern adherence
|
||||
- Dependency management
|
||||
- Scalability and extensibility
|
||||
- Module boundaries
|
||||
- API design consistency
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Circular dependencies, god objects, tight coupling across layers
|
||||
- High: Violated architectural principles, scalability bottlenecks
|
||||
- Medium: Missing abstractions, inconsistent patterns, suboptimal design
|
||||
- Low: Minor coupling issues, documentation gaps, naming inconsistencies
|
||||
`,
|
||||
quality: `
|
||||
Focus Areas:
|
||||
- Code duplication
|
||||
- Complexity (cyclomatic, cognitive)
|
||||
- Naming conventions
|
||||
- Error handling patterns
|
||||
- Code readability
|
||||
- Comment quality
|
||||
- Dead code
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe complexity (CC > 20), massive duplication (>50 lines)
|
||||
- High: High complexity (CC > 10), significant duplication, poor error handling
|
||||
- Medium: Moderate complexity (CC > 5), naming issues, code smells
|
||||
- Low: Minor duplication, documentation gaps, cosmetic issues
|
||||
`,
|
||||
'action-items': `
|
||||
Focus Areas:
|
||||
- Requirements coverage verification
|
||||
- Acceptance criteria met
|
||||
- Documentation completeness
|
||||
- Deployment readiness
|
||||
- Missing functionality
|
||||
- Test coverage gaps
|
||||
- Configuration management
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Core requirements not met, deployment blockers
|
||||
- High: Significant functionality missing, acceptance criteria not met
|
||||
- Medium: Minor requirements gaps, documentation incomplete
|
||||
- Low: Nice-to-have features missing, minor documentation gaps
|
||||
`,
|
||||
performance: `
|
||||
Focus Areas:
|
||||
- N+1 query problems
|
||||
- Inefficient algorithms (O(n²) where O(n log n) possible)
|
||||
- Memory leaks
|
||||
- Blocking operations on main thread
|
||||
- Missing caching opportunities
|
||||
- Resource usage (CPU, memory, network)
|
||||
- Database query optimization
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Memory leaks, O(n²) in hot path, blocking main thread
|
||||
- High: N+1 queries, missing indexes, inefficient algorithms
|
||||
- Medium: Suboptimal caching, unnecessary computations, lazy loading issues
|
||||
- Low: Minor optimization opportunities, redundant operations
|
||||
`,
|
||||
maintainability: `
|
||||
Focus Areas:
|
||||
- Technical debt indicators
|
||||
- Magic numbers and hardcoded values
|
||||
- Long methods (>50 lines)
|
||||
- Large classes (>500 lines)
|
||||
- Dead code and commented code
|
||||
- Code documentation
|
||||
- Test coverage
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Massive methods (>200 lines), severe technical debt blocking changes
|
||||
- High: Large methods (>100 lines), significant dead code, undocumented complex logic
|
||||
- Medium: Magic numbers, moderate technical debt, missing tests
|
||||
- Low: Minor refactoring opportunities, cosmetic improvements
|
||||
`,
|
||||
'best-practices': `
|
||||
Focus Areas:
|
||||
- Framework conventions adherence
|
||||
- Language idioms
|
||||
- Anti-patterns
|
||||
- Deprecated API usage
|
||||
- Coding standards compliance
|
||||
- Error handling patterns
|
||||
- Logging and monitoring
|
||||
|
||||
Severity Criteria:
|
||||
- Critical: Severe anti-patterns, deprecated APIs with security risks
|
||||
- High: Major convention violations, poor error handling, missing logging
|
||||
- Medium: Minor anti-patterns, style inconsistencies, suboptimal patterns
|
||||
- Low: Cosmetic style issues, minor convention deviations
|
||||
`
|
||||
};
|
||||
|
||||
return guidance[dimension] || 'Standard code review analysis';
|
||||
}
|
||||
```
|
||||
|
||||
### Completion Conditions
|
||||
|
||||
**Full Success**:
|
||||
- All dimensions reviewed
|
||||
- Critical findings = 0
|
||||
- High findings ≤ 5
|
||||
- Action: Generate final report, mark phase=complete
|
||||
|
||||
**Partial Success**:
|
||||
- All dimensions reviewed
|
||||
- Max iterations reached
|
||||
- Still have critical/high findings
|
||||
- Action: Generate report with warnings, recommend follow-up
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Phase-Level Error Matrix**:
|
||||
|
||||
| Phase | Error | Blocking? | Action |
|
||||
|-------|-------|-----------|--------|
|
||||
| Phase 1 | Session not found | Yes | Error and exit |
|
||||
| Phase 1 | No completed tasks | Yes | Error and exit |
|
||||
| Phase 1 | No changed files | Yes | Error and exit |
|
||||
| Phase 2 | Single dimension fails | No | Log warning, continue other dimensions |
|
||||
| Phase 2 | All dimensions fail | Yes | Error and exit |
|
||||
| Phase 3 | Missing dimension JSON | No | Skip in aggregation, log warning |
|
||||
| Phase 4 | Deep-dive agent fails | No | Skip finding, continue others |
|
||||
| Phase 4 | Max iterations reached | No | Generate partial report |
|
||||
|
||||
**CLI Fallback Chain**: Gemini → Qwen → Codex → degraded mode
|
||||
|
||||
**Fallback Triggers**:
|
||||
1. HTTP 429, 5xx errors, connection timeout
|
||||
2. Invalid JSON output (parse error, missing required fields)
|
||||
3. Low confidence score < 0.4
|
||||
4. Analysis too brief (< 100 words in report)
|
||||
|
||||
**Fallback Behavior**:
|
||||
- On trigger: Retry with next tool in chain
|
||||
- After Codex fails: Enter degraded mode (skip analysis, log error)
|
||||
- Degraded mode: Continue workflow with available results
|
||||
|
||||
### TodoWrite Structure
|
||||
|
||||
```javascript
|
||||
TodoWrite({
|
||||
todos: [
|
||||
{ content: "Phase 1: Discovery & Initialization", status: "completed", activeForm: "Initializing" },
|
||||
{ content: "Phase 2: Parallel Reviews (7 dimensions)", status: "in_progress", activeForm: "Reviewing" },
|
||||
{ content: " → Security review", status: "in_progress", activeForm: "Analyzing security" },
|
||||
// ... other dimensions as sub-items
|
||||
{ content: "Phase 3: Aggregation", status: "pending", activeForm: "Aggregating" },
|
||||
{ content: "Phase 4: Deep-dive", status: "pending", activeForm: "Deep-diving" },
|
||||
{ content: "Phase 5: Completion", status: "pending", activeForm: "Completing" }
|
||||
]
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Default Settings Work**: 7 dimensions + 3 iterations sufficient for most cases
|
||||
2. **Parallel Execution**: ~60 minutes for full initial review (7 dimensions)
|
||||
3. **Trust Aggregation Logic**: Auto-selection based on proven heuristics
|
||||
4. **Monitor Logs**: Check reports/ directory for CLI analysis insights
|
||||
|
||||
## Related Commands
|
||||
|
||||
### View Review Progress
|
||||
Use `ccw view` to open the review dashboard in browser:
|
||||
|
||||
```bash
|
||||
ccw view
|
||||
```
|
||||
|
||||
### Automated Fix Workflow
|
||||
After completing a review, use the generated findings JSON for automated fixing:
|
||||
|
||||
```bash
|
||||
# Step 1: Complete review (this command)
|
||||
/workflow:review-session-cycle
|
||||
|
||||
# Step 2: Run automated fixes using dimension findings
|
||||
/workflow:review-cycle-fix .workflow/active/WFS-{session-id}/.review/
|
||||
```
|
||||
|
||||
See `/workflow:review-cycle-fix` for automated fixing with smart grouping, parallel execution, and test verification.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: review
|
||||
description: Post-implementation review with specialized types (security/architecture/action-items/quality) using analysis agents and Gemini
|
||||
argument-hint: "[--type=security|architecture|action-items|quality] [optional: session-id]"
|
||||
argument-hint: "[--type=security|architecture|action-items|quality] [--archived] [optional: session-id]"
|
||||
---
|
||||
|
||||
## Command Overview: /workflow:review
|
||||
@@ -29,27 +29,70 @@ argument-hint: "[--type=security|architecture|action-items|quality] [optional: s
|
||||
- For documentation generation, use `/workflow:tools:docs`
|
||||
- For CLAUDE.md updates, use `/update-memory-related`
|
||||
|
||||
## Execution Process
|
||||
|
||||
```
|
||||
Input Parsing:
|
||||
├─ Parse --type flag (default: quality)
|
||||
├─ Parse --archived flag (search in archives)
|
||||
└─ Parse session-id argument (optional)
|
||||
|
||||
Step 1: Session Resolution
|
||||
└─ Decision:
|
||||
├─ session-id provided + --archived → Search .workflow/archives/
|
||||
├─ session-id provided → Search .workflow/active/ first, then archives
|
||||
└─ Not provided → Auto-detect from .workflow/active/
|
||||
|
||||
Step 2: Validation
|
||||
├─ Check session directory exists (active or archived)
|
||||
└─ Check for completed implementation (.summaries/IMPL-*.md exists)
|
||||
|
||||
Step 3: Type Check
|
||||
└─ Decision:
|
||||
├─ type=docs → Redirect to /workflow:tools:docs
|
||||
└─ Other types → Continue to analysis
|
||||
|
||||
Step 4: Model Analysis Phase
|
||||
├─ Load context (summaries, test results, changed files)
|
||||
└─ Perform specialized review by type:
|
||||
├─ security → Security patterns + Gemini analysis
|
||||
├─ architecture → Qwen architecture analysis
|
||||
├─ quality → Gemini code quality analysis
|
||||
└─ action-items → Requirements verification
|
||||
|
||||
Step 5: Generate Report
|
||||
└─ Output: REVIEW-{type}.md
|
||||
```
|
||||
|
||||
## Execution Template
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Optional specialized review for completed implementation
|
||||
|
||||
# Step 1: Session ID resolution
|
||||
# Step 1: Session ID resolution and location detection
|
||||
if [ -n "$SESSION_ARG" ]; then
|
||||
sessionId="$SESSION_ARG"
|
||||
else
|
||||
sessionId=$(find .workflow/ -name '.active-*' | head -1 | sed 's/.*active-//')
|
||||
sessionId=$(find .workflow/active/ -name "WFS-*" -type d | head -1 | xargs basename)
|
||||
fi
|
||||
|
||||
# Step 2: Validation
|
||||
if [ ! -d ".workflow/${sessionId}" ]; then
|
||||
echo "Session ${sessionId} not found"
|
||||
# Step 2: Resolve session path (active or archived)
|
||||
# Priority: --archived flag → active → archives
|
||||
if [ -n "$ARCHIVED_FLAG" ]; then
|
||||
sessionPath=".workflow/archives/${sessionId}"
|
||||
elif [ -d ".workflow/active/${sessionId}" ]; then
|
||||
sessionPath=".workflow/active/${sessionId}"
|
||||
elif [ -d ".workflow/archives/${sessionId}" ]; then
|
||||
sessionPath=".workflow/archives/${sessionId}"
|
||||
echo "Note: Session found in archives, running review on archived session"
|
||||
else
|
||||
echo "Session ${sessionId} not found in active or archives"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for completed tasks
|
||||
if [ ! -d ".workflow/${sessionId}/.summaries" ] || [ -z "$(find .workflow/${sessionId}/.summaries/ -name "IMPL-*.md" -type f 2>/dev/null)" ]; then
|
||||
if [ ! -d "${sessionPath}/.summaries" ] || [ -z "$(find ${sessionPath}/.summaries/ -name "IMPL-*.md" -type f 2>/dev/null)" ]; then
|
||||
echo "No completed implementation found. Complete implementation first"
|
||||
exit 1
|
||||
fi
|
||||
@@ -79,14 +122,18 @@ After bash validation, the model takes control to:
|
||||
|
||||
1. **Load Context**: Read completed task summaries and changed files
|
||||
```bash
|
||||
# Load implementation summaries
|
||||
cat .workflow/${sessionId}/.summaries/IMPL-*.md
|
||||
# Load implementation summaries (iterate through .summaries/ directory)
|
||||
for summary in ${sessionPath}/.summaries/*.md; do
|
||||
cat "$summary"
|
||||
done
|
||||
|
||||
# Load test results (if available)
|
||||
cat .workflow/${sessionId}/.summaries/TEST-FIX-*.md 2>/dev/null
|
||||
for test_summary in ${sessionPath}/.summaries/TEST-FIX-*.md 2>/dev/null; do
|
||||
cat "$test_summary"
|
||||
done
|
||||
|
||||
# Get changed files
|
||||
git log --since="$(cat .workflow/${sessionId}/workflow-session.json | jq -r .created_at)" --name-only --pretty=format: | sort -u
|
||||
git log --since="$(cat ${sessionPath}/workflow-session.json | jq -r .created_at)" --name-only --pretty=format: | sort -u
|
||||
```
|
||||
|
||||
2. **Perform Specialized Review**: Based on `review_type`
|
||||
@@ -99,54 +146,56 @@ After bash validation, the model takes control to:
|
||||
```
|
||||
- Use Gemini for security analysis:
|
||||
```bash
|
||||
cd .workflow/${sessionId} && gemini -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Security audit of completed implementation
|
||||
TASK: Review code for security vulnerabilities, insecure patterns, auth/authz issues
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
|
||||
EXPECTED: Security findings report with severity levels
|
||||
RULES: Focus on OWASP Top 10, authentication, authorization, data validation, injection risks
|
||||
" --approval-mode yolo
|
||||
" --tool gemini --mode write --cd ${sessionPath}
|
||||
```
|
||||
|
||||
**Architecture Review** (`--type=architecture`):
|
||||
- Use Qwen for architecture analysis:
|
||||
```bash
|
||||
cd .workflow/${sessionId} && qwen -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Architecture compliance review
|
||||
TASK: Evaluate adherence to architectural patterns, identify technical debt, review design decisions
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
|
||||
EXPECTED: Architecture assessment with recommendations
|
||||
RULES: Check for patterns, separation of concerns, modularity, scalability
|
||||
" --approval-mode yolo
|
||||
" --tool qwen --mode write --cd ${sessionPath}
|
||||
```
|
||||
|
||||
**Quality Review** (`--type=quality`):
|
||||
- Use Gemini for code quality:
|
||||
```bash
|
||||
cd .workflow/${sessionId} && gemini -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Code quality and best practices review
|
||||
TASK: Assess code readability, maintainability, adherence to best practices
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../CLAUDE.md
|
||||
CONTEXT: @.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
|
||||
EXPECTED: Quality assessment with improvement suggestions
|
||||
RULES: Check for code smells, duplication, complexity, naming conventions
|
||||
" --approval-mode yolo
|
||||
" --tool gemini --mode write --cd ${sessionPath}
|
||||
```
|
||||
|
||||
**Action Items Review** (`--type=action-items`):
|
||||
- Verify all requirements and acceptance criteria met:
|
||||
```bash
|
||||
# Load task requirements and acceptance criteria
|
||||
find .workflow/${sessionId}/.task -name "IMPL-*.json" -exec jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
' {} \;
|
||||
for task_file in ${sessionPath}/.task/*.json; do
|
||||
cat "$task_file" | jq -r '
|
||||
"Task: " + .id + "\n" +
|
||||
"Requirements: " + (.context.requirements | join(", ")) + "\n" +
|
||||
"Acceptance: " + (.context.acceptance | join(", "))
|
||||
'
|
||||
done
|
||||
|
||||
# Check implementation summaries against requirements
|
||||
cd .workflow/${sessionId} && gemini -p "
|
||||
ccw cli -p "
|
||||
PURPOSE: Verify all requirements and acceptance criteria are met
|
||||
TASK: Cross-check implementation summaries against original requirements
|
||||
CONTEXT: @.task/IMPL-*.json,.summaries/IMPL-*.md,../.. @../../CLAUDE.md
|
||||
CONTEXT: @.task/IMPL-*.json,.summaries/IMPL-*.md,../.. @../../project-tech.json @../../project-guidelines.json
|
||||
EXPECTED:
|
||||
- Requirements coverage matrix
|
||||
- Acceptance criteria verification
|
||||
@@ -157,7 +206,7 @@ After bash validation, the model takes control to:
|
||||
- Verify all acceptance criteria are met
|
||||
- Flag any incomplete or missing action items
|
||||
- Assess deployment readiness
|
||||
" --approval-mode yolo
|
||||
" --tool gemini --mode write --cd ${sessionPath}
|
||||
```
|
||||
|
||||
|
||||
@@ -195,7 +244,7 @@ After bash validation, the model takes control to:
|
||||
4. **Output Files**:
|
||||
```bash
|
||||
# Save review report
|
||||
Write(.workflow/${sessionId}/REVIEW-${review_type}.md)
|
||||
Write(${sessionPath}/REVIEW-${review_type}.md)
|
||||
|
||||
# Update session metadata
|
||||
# (optional) Update workflow-session.json with review status
|
||||
@@ -222,6 +271,12 @@ After bash validation, the model takes control to:
|
||||
# Architecture review for specific session
|
||||
/workflow:review --type=architecture WFS-payment-integration
|
||||
|
||||
# Review an archived session (auto-detects if not in active)
|
||||
/workflow:review --type=security WFS-old-feature
|
||||
|
||||
# Explicitly review archived session
|
||||
/workflow:review --archived --type=quality WFS-completed-feature
|
||||
|
||||
# Documentation review
|
||||
/workflow:review --type=docs
|
||||
```
|
||||
@@ -231,6 +286,7 @@ After bash validation, the model takes control to:
|
||||
- **Simple Validation**: Check session exists and has completed tasks
|
||||
- **No Complex Orchestration**: Direct analysis, no multi-phase pipeline
|
||||
- **Specialized Reviews**: Different prompts and tools for different review types
|
||||
- **Archived Session Support**: Review archived sessions with `--archived` flag or auto-detection
|
||||
- **MCP Integration**: Fast code search for security and architecture patterns
|
||||
- **CLI Tool Integration**: Gemini for analysis, Qwen for architecture
|
||||
- **Structured Output**: Markdown reports with severity levels and action items
|
||||
@@ -256,3 +312,11 @@ Optional Review (when needed):
|
||||
- Regular development (tests are sufficient)
|
||||
- Simple bug fixes (test-fix-agent handles it)
|
||||
- Minor changes (update-memory-related is enough)
|
||||
|
||||
## Post-Review Action
|
||||
|
||||
After review completion, prompt user:
|
||||
```
|
||||
Review complete. Would you like to complete and archive this session?
|
||||
→ Run /workflow:session:complete to archive with lessons learned
|
||||
```
|
||||
|
||||
@@ -1,147 +1,203 @@
|
||||
---
|
||||
name: complete
|
||||
description: Mark active workflow session as complete, archive with lessons learned, update manifest, remove active flag
|
||||
argument-hint: "[-y|--yes] [--detailed]"
|
||||
examples:
|
||||
- /workflow:session:complete
|
||||
- /workflow:session:complete --yes
|
||||
- /workflow:session:complete --detailed
|
||||
---
|
||||
|
||||
# Complete Workflow Session (/workflow:session:complete)
|
||||
|
||||
## Overview
|
||||
Mark the currently active workflow session as complete, analyze it for lessons learned, move it to the archive directory, and remove the active flag marker.
|
||||
Mark the currently active workflow session as complete, archive it, and update manifests.
|
||||
|
||||
## Usage
|
||||
```bash
|
||||
/workflow:session:complete # Complete current active session
|
||||
/workflow:session:complete --detailed # Show detailed completion summary
|
||||
```
|
||||
|
||||
## Implementation Flow
|
||||
|
||||
### Phase 1: Prepare for Archival (Minimal Manual Operations)
|
||||
|
||||
**Purpose**: Find active session, move to archive location, pass control to agent. Minimal operations.
|
||||
|
||||
#### Step 1.1: Find Active Session and Get Name
|
||||
```bash
|
||||
# Find active marker
|
||||
bash(find .workflow/ -name ".active-*" -type f | head -1)
|
||||
|
||||
# Extract session name from marker path
|
||||
bash(basename .workflow/.active-WFS-session-name | sed 's/^\.active-//')
|
||||
```
|
||||
**Output**: Session name `WFS-session-name`
|
||||
|
||||
#### Step 1.2: Move Session to Archive
|
||||
```bash
|
||||
# Create archive directory if needed
|
||||
bash(mkdir -p .workflow/.archives/)
|
||||
|
||||
# Move session to archive location
|
||||
bash(mv .workflow/WFS-session-name .workflow/.archives/WFS-session-name)
|
||||
```
|
||||
**Result**: Session now at `.workflow/.archives/WFS-session-name/`
|
||||
|
||||
### Phase 2: Agent-Orchestrated Completion (All Data Processing)
|
||||
|
||||
**Purpose**: Agent analyzes archived session, generates metadata, updates manifest, and removes active marker.
|
||||
|
||||
#### Agent Invocation
|
||||
|
||||
Invoke `universal-executor` agent to complete the archival process.
|
||||
|
||||
**Agent Task**:
|
||||
```
|
||||
Task(
|
||||
subagent_type="universal-executor",
|
||||
description="Complete session archival",
|
||||
prompt=`
|
||||
Complete workflow session archival. Session already moved to archive location.
|
||||
|
||||
## Context
|
||||
- Session: .workflow/.archives/WFS-session-name/
|
||||
- Active marker: .workflow/.active-WFS-session-name
|
||||
|
||||
## Tasks
|
||||
|
||||
1. **Extract session data** from workflow-session.json (session_id, description/topic, started_at/timestamp, completed_at, status)
|
||||
- If status != "completed", update it with timestamp
|
||||
|
||||
2. **Count files**: tasks (.task/*.json) and summaries (.summaries/*.md)
|
||||
|
||||
3. **Generate lessons**: Use gemini with ~/.claude/workflows/cli-templates/prompts/archive/analysis-simple.txt (fallback: analyze files directly)
|
||||
- Return: {successes, challenges, watch_patterns}
|
||||
|
||||
4. **Build archive entry**:
|
||||
- Calculate: duration_hours, success_rate, tags (3-5 keywords)
|
||||
- Construct complete JSON with session_id, description, archived_at, archive_path, metrics, tags, lessons
|
||||
|
||||
5. **Update manifest**: Initialize .workflow/.archives/manifest.json if needed, append entry
|
||||
|
||||
6. **Remove active marker**
|
||||
|
||||
7. **Return result**: {"status": "success", "session_id": "...", "archived_at": "...", "metrics": {...}, "lessons_summary": {...}}
|
||||
|
||||
## Error Handling
|
||||
- On failure: return {"status": "error", "task": "...", "message": "..."}
|
||||
- Do NOT remove marker if failed
|
||||
`
|
||||
)
|
||||
```
|
||||
|
||||
**Expected Output**:
|
||||
- Agent returns JSON result confirming successful archival
|
||||
- Display completion summary to user based on agent response
|
||||
|
||||
## Workflow Execution Strategy
|
||||
|
||||
### Two-Phase Approach (Optimized)
|
||||
|
||||
**Phase 1: Minimal Manual Setup** (2 simple operations)
|
||||
- Find active session and extract name
|
||||
- Move session to archive location
|
||||
- **No data extraction** - agent handles all data processing
|
||||
- **No counting** - agent does this from archive location
|
||||
- **Total**: 2 bash commands (find + move)
|
||||
|
||||
**Phase 2: Agent-Driven Completion** (1 agent invocation)
|
||||
- Extract all session data from archived location
|
||||
- Count tasks and summaries
|
||||
- Generate lessons learned analysis
|
||||
- Build complete archive metadata
|
||||
- Update manifest
|
||||
- Remove active marker
|
||||
- Return success/error result
|
||||
|
||||
## Quick Commands
|
||||
## Pre-defined Commands
|
||||
|
||||
```bash
|
||||
# Phase 1: Find and move
|
||||
bash(find .workflow/ -name ".active-*" -type f | head -1)
|
||||
bash(basename .workflow/.active-WFS-session-name | sed 's/^\.active-//')
|
||||
bash(mkdir -p .workflow/.archives/)
|
||||
bash(mv .workflow/WFS-session-name .workflow/.archives/WFS-session-name)
|
||||
# Phase 1: Find active session
|
||||
SESSION_PATH=$(find .workflow/active/ -maxdepth 1 -name "WFS-*" -type d | head -1)
|
||||
SESSION_ID=$(basename "$SESSION_PATH")
|
||||
|
||||
# Phase 2: Agent completes archival
|
||||
Task(subagent_type="universal-executor", description="Complete session archival", prompt=`...`)
|
||||
# Phase 3: Move to archive
|
||||
mkdir -p .workflow/archives/
|
||||
mv .workflow/active/$SESSION_ID .workflow/archives/$SESSION_ID
|
||||
|
||||
# Cleanup marker
|
||||
rm -f .workflow/archives/$SESSION_ID/.archiving
|
||||
```
|
||||
|
||||
## Archive Query Commands
|
||||
## Key Files to Read
|
||||
|
||||
After archival, you can query the manifest:
|
||||
**For manifest.json generation**, read ONLY these files:
|
||||
|
||||
| File | Extract |
|
||||
|------|---------|
|
||||
| `$SESSION_PATH/workflow-session.json` | session_id, description, started_at, status |
|
||||
| `$SESSION_PATH/IMPL_PLAN.md` | title (first # heading), description (first paragraph) |
|
||||
| `$SESSION_PATH/.tasks/*.json` | count files |
|
||||
| `$SESSION_PATH/.summaries/*.md` | count files |
|
||||
| `$SESSION_PATH/.review/dimensions/*.json` | count + findings summary (optional) |
|
||||
|
||||
## Execution Flow
|
||||
|
||||
### Phase 1: Find Session (2 commands)
|
||||
|
||||
```bash
|
||||
# List all archived sessions
|
||||
jq '.archives[].session_id' .workflow/.archives/manifest.json
|
||||
# 1. Find and extract session
|
||||
SESSION_PATH=$(find .workflow/active/ -maxdepth 1 -name "WFS-*" -type d | head -1)
|
||||
SESSION_ID=$(basename "$SESSION_PATH")
|
||||
|
||||
# Find sessions by keyword
|
||||
jq '.archives[] | select(.description | test("auth"; "i"))' .workflow/.archives/manifest.json
|
||||
|
||||
# Get specific session details
|
||||
jq '.archives[] | select(.session_id == "WFS-user-auth")' .workflow/.archives/manifest.json
|
||||
|
||||
# List all watch patterns across sessions
|
||||
jq '.archives[].lessons.watch_patterns[]' .workflow/.archives/manifest.json
|
||||
# 2. Check/create archiving marker
|
||||
test -f "$SESSION_PATH/.archiving" && echo "RESUMING" || touch "$SESSION_PATH/.archiving"
|
||||
```
|
||||
|
||||
**Output**: `SESSION_ID` = e.g., `WFS-auth-feature`
|
||||
|
||||
### Phase 2: Generate Manifest Entry (Read-only)
|
||||
|
||||
Read the key files above, then build this structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "<from workflow-session.json>",
|
||||
"description": "<from workflow-session.json>",
|
||||
"archived_at": "<current ISO timestamp>",
|
||||
"archive_path": ".workflow/archives/<SESSION_ID>",
|
||||
"metrics": {
|
||||
"duration_hours": "<(completed_at - started_at) / 3600000>",
|
||||
"tasks_completed": "<count .tasks/*.json>",
|
||||
"summaries_generated": "<count .summaries/*.md>",
|
||||
"review_metrics": {
|
||||
"dimensions_analyzed": "<count .review/dimensions/*.json>",
|
||||
"total_findings": "<sum from dimension JSONs>"
|
||||
}
|
||||
},
|
||||
"tags": ["<3-5 keywords from IMPL_PLAN.md>"],
|
||||
"lessons": {
|
||||
"successes": ["<key wins>"],
|
||||
"challenges": ["<difficulties>"],
|
||||
"watch_patterns": ["<patterns to monitor>"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Lessons Generation**: Use gemini with `~/.claude/workflows/cli-templates/prompts/archive/analysis-simple.txt`
|
||||
|
||||
### Phase 3: Atomic Commit (4 commands)
|
||||
|
||||
```bash
|
||||
# 1. Create archive directory
|
||||
mkdir -p .workflow/archives/
|
||||
|
||||
# 2. Move session
|
||||
mv .workflow/active/$SESSION_ID .workflow/archives/$SESSION_ID
|
||||
|
||||
# 3. Update manifest.json (Read → Append → Write)
|
||||
# Read: .workflow/archives/manifest.json (or [])
|
||||
# Append: archive_entry from Phase 2
|
||||
# Write: updated JSON
|
||||
|
||||
# 4. Remove marker
|
||||
rm -f .workflow/archives/$SESSION_ID/.archiving
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
✓ Session "$SESSION_ID" archived successfully
|
||||
Location: .workflow/archives/$SESSION_ID/
|
||||
Manifest: Updated with N total sessions
|
||||
```
|
||||
|
||||
### Phase 4: Update project-tech.json (Optional)
|
||||
|
||||
**Skip if**: `.workflow/project-tech.json` doesn't exist
|
||||
|
||||
```bash
|
||||
# Check
|
||||
test -f .workflow/project-tech.json || echo "SKIP"
|
||||
```
|
||||
|
||||
**If exists**, add feature entry:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "<slugified title>",
|
||||
"title": "<from IMPL_PLAN.md>",
|
||||
"status": "completed",
|
||||
"tags": ["<from Phase 2>"],
|
||||
"timeline": { "implemented_at": "<date>" },
|
||||
"traceability": { "session_id": "<SESSION_ID>", "archive_path": "<path>" }
|
||||
}
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
✓ Feature added to project registry
|
||||
```
|
||||
|
||||
### Phase 5: Ask About Solidify (Always)
|
||||
|
||||
After successful archival, prompt user to capture learnings:
|
||||
|
||||
```javascript
|
||||
// Parse --yes flag
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
|
||||
if (autoYes) {
|
||||
// Auto mode: Skip solidify
|
||||
console.log(`[--yes] Auto-selecting: Skip solidify`)
|
||||
console.log(`Session archived successfully.`)
|
||||
// Done - no solidify
|
||||
} else {
|
||||
// Interactive mode: Ask user
|
||||
AskUserQuestion({
|
||||
questions: [{
|
||||
question: "Would you like to solidify learnings from this session into project guidelines?",
|
||||
header: "Solidify",
|
||||
options: [
|
||||
{ label: "Yes, solidify now", description: "Extract learnings and update project-guidelines.json" },
|
||||
{ label: "Skip", description: "Archive complete, no learnings to capture" }
|
||||
],
|
||||
multiSelect: false
|
||||
}]
|
||||
})
|
||||
|
||||
// **If "Yes, solidify now"**: Execute `/workflow:session:solidify` with the archived session ID.
|
||||
}
|
||||
```
|
||||
|
||||
## Auto Mode Defaults
|
||||
|
||||
When `--yes` or `-y` flag is used:
|
||||
- **Solidify Learnings**: Auto-selected "Skip" (archive only, no solidify)
|
||||
|
||||
**Flag Parsing**:
|
||||
```javascript
|
||||
const autoYes = $ARGUMENTS.includes('--yes') || $ARGUMENTS.includes('-y')
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
Session archived successfully.
|
||||
→ Run /workflow:session:solidify to capture learnings (recommended)
|
||||
```
|
||||
|
||||
## Error Recovery
|
||||
|
||||
| Phase | Symptom | Recovery |
|
||||
|-------|---------|----------|
|
||||
| 1 | No active session | `No active session found` |
|
||||
| 2 | Analysis fails | Remove marker: `rm $SESSION_PATH/.archiving`, retry |
|
||||
| 3 | Move fails | Session safe in active/, fix issue, retry |
|
||||
| 3 | Manifest fails | Session in archives/, manually add entry, remove marker |
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```
|
||||
Phase 1: find session → create .archiving marker
|
||||
Phase 2: read key files → build manifest entry (no writes)
|
||||
Phase 3: mkdir → mv → update manifest.json → rm marker
|
||||
Phase 4: update project-tech.json features array (optional)
|
||||
Phase 5: ask user → solidify learnings (optional)
|
||||
```
|
||||
|
||||
@@ -19,35 +19,35 @@ Display all workflow sessions with their current status, progress, and metadata.
|
||||
|
||||
### Step 1: Find All Sessions
|
||||
```bash
|
||||
ls .workflow/WFS-* 2>/dev/null
|
||||
ls .workflow/active/WFS-* 2>/dev/null
|
||||
```
|
||||
|
||||
### Step 2: Check Active Session
|
||||
```bash
|
||||
ls .workflow/.active-* 2>/dev/null | head -1
|
||||
find .workflow/active/ -name "WFS-*" -type d 2>/dev/null | head -1
|
||||
```
|
||||
|
||||
### Step 3: Read Session Metadata
|
||||
```bash
|
||||
jq -r '.session_id, .status, .project' .workflow/WFS-session/workflow-session.json
|
||||
jq -r '.session_id, .status, .project' .workflow/active/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
### Step 4: Count Task Progress
|
||||
```bash
|
||||
find .workflow/WFS-session/.task/ -name "*.json" -type f 2>/dev/null | wc -l
|
||||
find .workflow/WFS-session/.summaries/ -name "*.md" -type f 2>/dev/null | wc -l
|
||||
find .workflow/active/WFS-session/.task/ -name "*.json" -type f 2>/dev/null | wc -l
|
||||
find .workflow/active/WFS-session/.summaries/ -name "*.md" -type f 2>/dev/null | wc -l
|
||||
```
|
||||
|
||||
### Step 5: Get Creation Time
|
||||
```bash
|
||||
jq -r '.created_at // "unknown"' .workflow/WFS-session/workflow-session.json
|
||||
jq -r '.created_at // "unknown"' .workflow/active/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
## Simple Bash Commands
|
||||
|
||||
### Basic Operations
|
||||
- **List sessions**: `find .workflow/ -maxdepth 1 -type d -name "WFS-*"`
|
||||
- **Find active**: `find .workflow/ -name ".active-*" -type f`
|
||||
- **List sessions**: `find .workflow/active/ -name "WFS-*" -type d`
|
||||
- **Find active**: `find .workflow/active/ -name "WFS-*" -type d`
|
||||
- **Read session data**: `jq -r '.session_id, .status' session.json`
|
||||
- **Count tasks**: `find .task/ -name "*.json" -type f | wc -l`
|
||||
- **Count completed**: `find .summaries/ -name "*.md" -type f 2>/dev/null | wc -l`
|
||||
@@ -89,11 +89,8 @@ Total: 3 sessions (1 active, 1 paused, 1 completed)
|
||||
### Quick Commands
|
||||
```bash
|
||||
# Count all sessions
|
||||
ls .workflow/WFS-* | wc -l
|
||||
|
||||
# Show only active
|
||||
ls .workflow/.active-* | basename | sed 's/^\.active-//'
|
||||
ls .workflow/active/WFS-* | wc -l
|
||||
|
||||
# Show recent sessions
|
||||
ls -t .workflow/WFS-*/workflow-session.json | head -3
|
||||
ls -t .workflow/active/WFS-*/workflow-session.json | head -3
|
||||
```
|
||||
@@ -17,45 +17,39 @@ Resume the most recently paused workflow session, restoring all context and stat
|
||||
|
||||
### Step 1: Find Paused Sessions
|
||||
```bash
|
||||
ls .workflow/WFS-* 2>/dev/null
|
||||
ls .workflow/active/WFS-* 2>/dev/null
|
||||
```
|
||||
|
||||
### Step 2: Check Session Status
|
||||
```bash
|
||||
jq -r '.status' .workflow/WFS-session/workflow-session.json
|
||||
jq -r '.status' .workflow/active/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
### Step 3: Find Most Recent Paused
|
||||
```bash
|
||||
ls -t .workflow/WFS-*/workflow-session.json | head -1
|
||||
ls -t .workflow/active/WFS-*/workflow-session.json | head -1
|
||||
```
|
||||
|
||||
### Step 4: Update Session Status
|
||||
```bash
|
||||
jq '.status = "active"' .workflow/WFS-session/workflow-session.json > temp.json
|
||||
mv temp.json .workflow/WFS-session/workflow-session.json
|
||||
jq '.status = "active"' .workflow/active/WFS-session/workflow-session.json > temp.json
|
||||
mv temp.json .workflow/active/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
### Step 5: Add Resume Timestamp
|
||||
```bash
|
||||
jq '.resumed_at = "'$(date -u +%Y-%m-%dT%H:%M:%SZ)'"' .workflow/WFS-session/workflow-session.json > temp.json
|
||||
mv temp.json .workflow/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
### Step 6: Create Active Marker
|
||||
```bash
|
||||
touch .workflow/.active-WFS-session-name
|
||||
jq '.resumed_at = "'$(date -u +%Y-%m-%dT%H:%M:%SZ)'"' .workflow/active/WFS-session/workflow-session.json > temp.json
|
||||
mv temp.json .workflow/active/WFS-session/workflow-session.json
|
||||
```
|
||||
|
||||
## Simple Bash Commands
|
||||
|
||||
### Basic Operations
|
||||
- **List sessions**: `ls .workflow/WFS-*`
|
||||
- **List sessions**: `ls .workflow/active/WFS-*`
|
||||
- **Check status**: `jq -r '.status' session.json`
|
||||
- **Find recent**: `ls -t .workflow/*/workflow-session.json | head -1`
|
||||
- **Find recent**: `ls -t .workflow/active/*/workflow-session.json | head -1`
|
||||
- **Update status**: `jq '.status = "active"' session.json > temp.json`
|
||||
- **Add timestamp**: `jq '.resumed_at = "'$(date -u +%Y-%m-%dT%H:%M:%SZ)'"'`
|
||||
- **Create marker**: `touch .workflow/.active-session`
|
||||
|
||||
### Resume Result
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user