diff --git a/.gitignore b/.gitignore index 40674f75af0..5360182e6c1 100644 --- a/.gitignore +++ b/.gitignore @@ -90,3 +90,8 @@ ENV/ # Rope project settings .ropeproject +.ccls-cache +/bin +/bin-* +.pytest_cache +test/test_analytics/actualdump diff --git a/Pipfile b/Pipfile index 98efd4b5ae2..fa79d4c2768 100644 --- a/Pipfile +++ b/Pipfile @@ -17,6 +17,12 @@ pytest = "*" pydot = "*" ipykernel = "*" matplotlib = "*" +antlr4-python3-runtime = "*" +python-jsonrpc-server = "==0.0.2" +pytest-asyncio = "*" +aenum = "*" +pytest-cov = "*" +sphinx = "*" [dev-packages] diff --git a/Pipfile.lock b/Pipfile.lock index a1ed5eb9a33..e77ee5639da 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "c903c65267c881c1458b0a6878c64ac1f9a50e46711ae1bcfadc28f77242e056" + "sha256": "95e712caa2eee3aa33a2a0cfbdbe2805543febfd2429fbded1fea743ecb65cc2" }, "pipfile-spec": 6, "requires": { @@ -16,6 +16,29 @@ ] }, "default": { + "aenum": { + "hashes": [ + "sha256:3df9b84cce5dc9ed77c337079f97b66c44c0053eb87d6f4d46b888dc45801e38", + "sha256:7a77c205c4bc9d7fe9bd73b3193002d724aebf5909fa0d297534208953891ec8", + "sha256:a3208e4b28db3a7b232ff69b934aef2ea1bf27286d9978e1e597d46f490e4687" + ], + "index": "pypi", + "version": "==2.1.2" + }, + "alabaster": { + "hashes": [ + "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359", + "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02" + ], + "version": "==0.7.12" + }, + "antlr4-python3-runtime": { + "hashes": [ + "sha256:168cdcec8fb9152e84a87ca6fd261b3d54c8f6358f42ab3b813b14a7193bb50b" + ], + "index": "pypi", + "version": "==4.7.2" + }, "appnope": { "hashes": [ "sha256:5b26757dc6f79a3b7dc9fab95359328d5747fcb2409d331ea66d0272b90ab2a0", @@ -26,11 +49,10 @@ }, "atomicwrites": { "hashes": [ - "sha256:0312ad34fcad8fac3704d441f7b317e50af620823353ec657a53e981f92920c0", - "sha256:ec9ae8adaae229e4f8446952d204a3e4b5fdd2d099f9be3aaf556120135fb3ee" + "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4", + "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6" ], - "markers": "python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==1.2.1" + "version": "==1.3.0" }, "attrs": { "hashes": [ @@ -39,6 +61,13 @@ ], "version": "==18.2.0" }, + "babel": { + "hashes": [ + "sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669", + "sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23" + ], + "version": "==2.6.0" + }, "backcall": { "hashes": [ "sha256:38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4", @@ -48,10 +77,10 @@ }, "certifi": { "hashes": [ - "sha256:376690d6f16d32f9d1fe8932551d80b23e9d393a8578c5633a2ed39a64861638", - "sha256:456048c7e371c089d0a77a5212fb37a2c2dce1e24146e3b7e0261736aaeaa22a" + "sha256:47f9c83ef4c0c621eaef743f133f09fa8a74a9b75f037e8624f83bd1b6626cb7", + "sha256:993f830721089fef441cdfeb4b2c8c9df86f0c63239f06bd025a76a7daddb033" ], - "version": "==2018.8.24" + "version": "==2018.11.29" }, "chardet": { "hashes": [ @@ -60,6 +89,42 @@ ], "version": "==3.0.4" }, + "coverage": { + "hashes": [ + "sha256:09e47c529ff77bf042ecfe858fb55c3e3eb97aac2c87f0349ab5a7efd6b3939f", + "sha256:0a1f9b0eb3aa15c990c328535655847b3420231af299386cfe5efc98f9c250fe", + "sha256:0cc941b37b8c2ececfed341444a456912e740ecf515d560de58b9a76562d966d", + "sha256:10e8af18d1315de936d67775d3a814cc81d0747a1a0312d84e27ae5610e313b0", + "sha256:1b4276550b86caa60606bd3572b52769860a81a70754a54acc8ba789ce74d607", + "sha256:1e8a2627c48266c7b813975335cfdea58c706fe36f607c97d9392e61502dc79d", + "sha256:2b224052bfd801beb7478b03e8a66f3f25ea56ea488922e98903914ac9ac930b", + "sha256:447c450a093766744ab53bf1e7063ec82866f27bcb4f4c907da25ad293bba7e3", + "sha256:46101fc20c6f6568561cdd15a54018bb42980954b79aa46da8ae6f008066a30e", + "sha256:4710dc676bb4b779c4361b54eb308bc84d64a2fa3d78e5f7228921eccce5d815", + "sha256:510986f9a280cd05189b42eee2b69fecdf5bf9651d4cd315ea21d24a964a3c36", + "sha256:5535dda5739257effef56e49a1c51c71f1d37a6e5607bb25a5eee507c59580d1", + "sha256:5a7524042014642b39b1fcae85fb37556c200e64ec90824ae9ecf7b667ccfc14", + "sha256:5f55028169ef85e1fa8e4b8b1b91c0b3b0fa3297c4fb22990d46ff01d22c2d6c", + "sha256:6694d5573e7790a0e8d3d177d7a416ca5f5c150742ee703f3c18df76260de794", + "sha256:6831e1ac20ac52634da606b658b0b2712d26984999c9d93f0c6e59fe62ca741b", + "sha256:77f0d9fa5e10d03aa4528436e33423bfa3718b86c646615f04616294c935f840", + "sha256:828ad813c7cdc2e71dcf141912c685bfe4b548c0e6d9540db6418b807c345ddd", + "sha256:85a06c61598b14b015d4df233d249cd5abfa61084ef5b9f64a48e997fd829a82", + "sha256:8cb4febad0f0b26c6f62e1628f2053954ad2c555d67660f28dfb1b0496711952", + "sha256:a5c58664b23b248b16b96253880b2868fb34358911400a7ba39d7f6399935389", + "sha256:aaa0f296e503cda4bc07566f592cd7a28779d433f3a23c48082af425d6d5a78f", + "sha256:ab235d9fe64833f12d1334d29b558aacedfbca2356dfb9691f2d0d38a8a7bfb4", + "sha256:b3b0c8f660fae65eac74fbf003f3103769b90012ae7a460863010539bb7a80da", + "sha256:bab8e6d510d2ea0f1d14f12642e3f35cefa47a9b2e4c7cea1852b52bc9c49647", + "sha256:c45297bbdbc8bb79b02cf41417d63352b70bcb76f1bbb1ee7d47b3e89e42f95d", + "sha256:d19bca47c8a01b92640c614a9147b081a1974f69168ecd494687c827109e8f42", + "sha256:d64b4340a0c488a9e79b66ec9f9d77d02b99b772c8b8afd46c1294c1d39ca478", + "sha256:da969da069a82bbb5300b59161d8d7c8d423bc4ccd3b410a9b4d8932aeefc14b", + "sha256:ed02c7539705696ecb7dc9d476d861f3904a8d2b7e894bd418994920935d36bb", + "sha256:ee5b8abc35b549012e03a7b1e86c09491457dba6c94112a2482b18589cc2bdb9" + ], + "version": "==4.5.2" + }, "cycler": { "hashes": [ "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", @@ -69,10 +134,18 @@ }, "decorator": { "hashes": [ - "sha256:2c51dff8ef3c447388fe5e4453d24a2bf128d3a4c32af3fabef1f01c6851ab82", - "sha256:c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c" + "sha256:33cd704aea07b4c28b3eb2c97d288a06918275dac0ecebdaf1bc8a48d98adb9e", + "sha256:cabb249f4710888a2fc0e13e9a16c343d932033718ff62e1e9bc93a9d3a9122b" ], - "version": "==4.3.0" + "version": "==4.3.2" + }, + "docutils": { + "hashes": [ + "sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6", + "sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274", + "sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6" + ], + "version": "==0.14" }, "et-xmlfile": { "hashes": [ @@ -80,12 +153,18 @@ ], "version": "==1.0.1" }, + "future": { + "hashes": [ + "sha256:67045236dcfd6816dc439556d009594abf643e5eb48992e36beac09c2ca659b8" + ], + "version": "==0.17.1" + }, "gitdb2": { "hashes": [ - "sha256:87783b7f4a8f6b71c7fe81d32179b3c8781c1a7d6fa0c69bff2f315b00aff4f8", - "sha256:bb4c85b8a58531c51373c89f92163b92f30f81369605a67cd52d1fc21246c044" + "sha256:83361131a1836661a155172932a13c08bda2db3674e4caa32368aa6eb02f38c2", + "sha256:e3a0141c5f2a3f635c7209d56c496ebe1ad35da82fe4d3ec4aaa36278d70648a" ], - "version": "==2.0.4" + "version": "==2.0.5" }, "gitpython": { "hashes": [ @@ -97,25 +176,32 @@ }, "idna": { "hashes": [ - "sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e", - "sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16" + "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", + "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c" + ], + "version": "==2.8" + }, + "imagesize": { + "hashes": [ + "sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8", + "sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5" ], - "version": "==2.7" + "version": "==1.1.0" }, "ipykernel": { "hashes": [ - "sha256:3e0ffdf545c0bf80d9dab6523ec6829831408c474772487aeb6eb9f0348b6a1e", - "sha256:7cd5e90bc882c13f9c5e76330cb5242280e293cbe9f1a622508762124a103a82" + "sha256:0aeb7ec277ac42cc2b59ae3d08b10909b2ec161dc6908096210527162b53675d", + "sha256:0fc0bf97920d454102168ec2008620066878848fcfca06c22b669696212e292f" ], "index": "pypi", - "version": "==5.0.0" + "version": "==5.1.0" }, "ipython": { "hashes": [ - "sha256:47b17ea874454a5c2eacc2732b04a750d260b01ba479323155ac8a39031f5535", - "sha256:9fed506c3772c875a3048bc134a25e6f5e997b1569b2636f6a5d891f34cbfd46" + "sha256:06de667a9e406924f97781bda22d5d76bfb39762b678762d86a466e63f65dc39", + "sha256:5d3e020a6b5f29df037555e5c45ab1088d6a7cf3bd84f47e0ba501eeb0c3ec82" ], - "version": "==7.0.1" + "version": "==7.3.0" }, "ipython-genutils": { "hashes": [ @@ -133,17 +219,24 @@ }, "jedi": { "hashes": [ - "sha256:0191c447165f798e6a730285f2eee783fff81b0d3df261945ecb80983b5c3ca7", - "sha256:b7493f73a2febe0dc33d51c99b474547f7f6c0b2c8fb2b21f453eef204c12148" + "sha256:2bb0603e3506f708e792c7f4ad8fc2a7a9d9c2d292a358fbbd58da531695595b", + "sha256:2c6bcd9545c7d6440951b12b44d373479bf18123a401a52025cf98563fbd826c" + ], + "version": "==0.13.3" + }, + "jinja2": { + "hashes": [ + "sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd", + "sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4" ], - "version": "==0.13.1" + "version": "==2.10" }, "jupyter-client": { "hashes": [ - "sha256:27befcf0446b01e29853014d6a902dd101ad7d7f94e2252b1adca17c3466b761", - "sha256:59e6d791e22a8002ad0e80b78c6fd6deecab4f9e1b1aa1a22f4213de271b29ea" + "sha256:b5f9cb06105c1d2d30719db5ffb3ea67da60919fb68deaefa583deccd8813551", + "sha256:c44411eb1463ed77548bc2d5ec0d744c9b81c4a542d9637c7a52824e2121b987" ], - "version": "==5.2.3" + "version": "==5.2.4" }, "jupyter-core": { "hashes": [ @@ -183,71 +276,99 @@ "sha256:efabbcd4f406b532206b8801058c8bab9e79645b9880329253ae3322b7b02cd5", "sha256:f923406e6b32c86309261b8195e24e18b6a8801df0cfc7814ac44017bfcb3939" ], - "markers": "python_version >= '2.7' and python_version != '3.0.*' and python_version != '3.2.*' and python_version != '3.3.*' and python_version != '3.1.*'", "version": "==1.0.1" }, "lxml": { "hashes": [ - "sha256:02bc220d61f46e9b9d5a53c361ef95e9f5e1d27171cd461dddb17677ae2289a5", - "sha256:22f253b542a342755f6cfc047fe4d3a296515cf9b542bc6e261af45a80b8caf6", - "sha256:2f31145c7ff665b330919bfa44aacd3a0211a76ca7e7b441039d2a0b0451e415", - "sha256:36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f", - "sha256:438a1b0203545521f6616132bfe0f4bca86f8a401364008b30e2b26ec408ce85", - "sha256:4815892904c336bbaf73dafd54f45f69f4021c22b5bad7332176bbf4fb830568", - "sha256:5be031b0f15ad63910d8e5038b489d95a79929513b3634ad4babf77100602588", - "sha256:5c93ae37c3c588e829b037fdfbd64a6e40c901d3f93f7beed6d724c44829a3ad", - "sha256:60842230678674cdac4a1cf0f707ef12d75b9a4fc4a565add4f710b5fcf185d5", - "sha256:62939a8bb6758d1bf923aa1c13f0bcfa9bf5b2fc0f5fa917a6e25db5fe0cfa4e", - "sha256:75830c06a62fe7b8fe3bbb5f269f0b308f19f3949ac81cfd40062f47c1455faf", - "sha256:81992565b74332c7c1aff6a913a3e906771aa81c9d0c68c68113cffcae45bc53", - "sha256:8c892fb0ee52c594d9a7751c7d7356056a9682674b92cc1c4dc968ff0f30c52f", - "sha256:9d862e3cf4fc1f2837dedce9c42269c8c76d027e49820a548ac89fdcee1e361f", - "sha256:a623965c086a6e91bb703d4da62dabe59fe88888e82c4117d544e11fd74835d6", - "sha256:a7783ab7f6a508b0510490cef9f857b763d796ba7476d9703f89722928d1e113", - "sha256:aab09fbe8abfa3b9ce62aaf45aca2d28726b1b9ee44871dbe644050a2fff4940", - "sha256:abf181934ac3ef193832fb973fd7f6149b5c531903c2ec0f1220941d73eee601", - "sha256:ae07fa0c115733fce1e9da96a3ac3fa24801742ca17e917e0c79d63a01eeb843", - "sha256:b9c78242219f674ab645ec571c9a95d70f381319a23911941cd2358a8e0521cf", - "sha256:bccb267678b870d9782c3b44d0cefe3ba0e329f9af8c946d32bf3778e7a4f271", - "sha256:c4df4d27f4c93b2cef74579f00b1d3a31a929c7d8023f870c4b476f03a274db4", - "sha256:caf0e50b546bb60dfa99bb18dfa6748458a83131ecdceaf5c071d74907e7e78a", - "sha256:d3266bd3ac59ac4edcd5fa75165dee80b94a3e5c91049df5f7c057ccf097551c", - "sha256:db0d213987bcd4e6d41710fb4532b22315b0d8fb439ff901782234456556aed1", - "sha256:dbbd5cf7690a40a9f0a9325ab480d0fccf46d16b378eefc08e195d84299bfae1", - "sha256:e16e07a0ec3a75b5ee61f2b1003c35696738f937dc8148fbda9fe2147ccb6e61", - "sha256:e175a006725c7faadbe69e791877d09936c0ef2cf49d01b60a6c1efcb0e8be6f", - "sha256:edd9c13a97f6550f9da2236126bb51c092b3b1ce6187f2bd966533ad794bbb5e", - "sha256:fa39ea60d527fbdd94215b5e5552f1c6a912624521093f1384a491a8ad89ad8b" + "sha256:0358b9e9642bc7d39aac5cffe9884a99a5ca68e5e2c1b89e570ed60da9139908", + "sha256:091a359c4dafebbecd3959d9013f1b896b5371859165e4e50b01607a98d9e3e2", + "sha256:1998e4e60603c64bcc35af61b4331ab3af087457900d3980e18d190e17c3a697", + "sha256:2000b4088dee9a41f459fddaf6609bba48a435ce6374bb254c5ccdaa8928c5ba", + "sha256:2afb0064780d8aaf165875be5898c1866766e56175714fa5f9d055433e92d41d", + "sha256:2d8f1d9334a4e3ff176d096c14ded3100547d73440683567d85b8842a53180bb", + "sha256:2e38db22f6a3199fd63675e1b4bd795d676d906869047398f29f38ca55cb453a", + "sha256:3181f84649c1a1ca62b19ddf28436b1b2cb05ae6c7d2628f33872e713994c364", + "sha256:37462170dfd88af8431d04de6b236e6e9c06cda71e2ca26d88ef2332fd2a5237", + "sha256:3a9d8521c89bf6f2a929c3d12ad3ad7392c774c327ea809fd08a13be6b3bc05f", + "sha256:3d0bbd2e1a28b4429f24fd63a122a450ce9edb7a8063d070790092d7343a1aa4", + "sha256:483d60585ce3ee71929cea70949059f83850fa5e12deb9c094ed1c8c2ec73cbd", + "sha256:4888be27d5cba55ce94209baef5bcd7bbd7314a3d17021a5fc10000b3a5f737d", + "sha256:64b0d62e4209170a2a0c404c446ab83b941a0003e96604d2e4f4cb735f8a2254", + "sha256:68010900898fdf139ac08549c4dba8206c584070a960ffc530aebf0c6f2794ef", + "sha256:872ecb066de602a0099db98bd9e57f4cfc1d62f6093d94460c787737aa08f39e", + "sha256:88a32b03f2e4cd0e63f154cac76724709f40b3fc2f30139eb5d6f900521b44ed", + "sha256:b1dc7683da4e67ab2bebf266afa68098d681ae02ce570f0d1117312273d2b2ac", + "sha256:b29e27ce9371810250cb1528a771d047a9c7b0f79630dc7dc5815ff828f4273b", + "sha256:ce197559596370d985f1ce6b7051b52126849d8159040293bf8b98cb2b3e1f78", + "sha256:d45cf6daaf22584eff2175f48f82c4aa24d8e72a44913c5aff801819bb73d11f", + "sha256:e2ff9496322b2ce947ba4a7a5eb048158de9d6f3fe9efce29f1e8dd6878561e6", + "sha256:f7b979518ec1f294a41a707c007d54d0f3b3e1fd15d5b26b7e99b62b10d9a72e", + "sha256:f9c7268e9d16e34e50f8246c4f24cf7353764affd2bc971f0379514c246e3f6b", + "sha256:f9c839806089d79de588ee1dde2dae05dc1156d3355dfeb2b51fde84d9c960ad", + "sha256:ff962953e2389226adc4d355e34a98b0b800984399153c6678f2367b11b4d4b8" ], "index": "pypi", - "version": "==4.2.5" + "version": "==4.3.2" + }, + "markupsafe": { + "hashes": [ + "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", + "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", + "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", + "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", + "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", + "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", + "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", + "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", + "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", + "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", + "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", + "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", + "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", + "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", + "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", + "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", + "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", + "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", + "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", + "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", + "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", + "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", + "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", + "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", + "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", + "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", + "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", + "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7" + ], + "version": "==1.1.1" }, "matplotlib": { "hashes": [ - "sha256:0f738b57051e8a0f8bc8282031d0a82e9dedbd10a94fd54d4c3830d708607a8b", - "sha256:0f87d188528ff3c86286603bc13170a5932e631c0c69d9995aae86448a7d9692", - "sha256:290864f3c69d1e71d6648c9c75093db28486f1bf058b0ab2fda9d2d6814ddf19", - "sha256:70aba3a1c7dbef5a997db8afe06e256c6b67e1dc15bb16d8b55d140ea5375a8d", - "sha256:7318d11a4784c3e37f5de0c9141d18eac08565d303da7d3d557662369f2f866b", - "sha256:77c6edc4e25b36430df8a445195030abc8d5766d068b9aeed1a58a684cc0eb3b", - "sha256:91669d38938ae7b66db084e444ee5dceed09b59a6622fda10dfb021d5ce6d0dc", - "sha256:a547edc4d0ce68f3eb397ed8701314f254a0de593045ee0eecad4f1efc664951", - "sha256:b4e2333c98a7c2c1ff6eb930cd2b57d4b818de5437c5048802096b32f66e65f9", - "sha256:c99b3908e76de5d1582e6941dc34de086eb38d18539520f4ae4ffa29b8f2644f", - "sha256:e3acc990b3672132a670b23cc055b967d0aa04183dbc5be82a38a0426ee6d1a6", - "sha256:eaa8f8248c20eacfade26faf749e248adc1bec1edc2d08b05916297cc76a72bd", - "sha256:ede6d9676c43844e4994b041ffca08dd157ce171190a8ccb40fed9b377db5653" + "sha256:1ae6549976b6ceb6ee426272a28c0fc9715b3e3669694d560c8f661c5b39e2c5", + "sha256:4d4250bf508dd07cca3b43888097f873cadb66eec6ac63dbbfb798798ec07af2", + "sha256:53af2e01d7f1700ed2b64a9091bc865360c9c4032f625451c4589a826854c787", + "sha256:63e498067d32d627111cd1162cae1621f1221f9d4c6a9745dd7233f29de581b6", + "sha256:7169a34971e398dd58e87e173f97366fd88a3fa80852704530433eb224a8ca57", + "sha256:91c54d6bb9eeaaff965656c5ea6cbdcbf780bad8462ac99b30b451548194746f", + "sha256:aeef177647bb3fccfe09065481989d7dfc5ac59e9367d6a00a3481062cf651e4", + "sha256:cf8ae10559a78aee0409ede1e9d4fda03895433eeafe609dd9ed67e45f552db0", + "sha256:d51d0889d1c4d51c51a9822265c0494ea3e70a52bdd88358e0863daca46fa23a", + "sha256:de5ccd3500247f85fe4f9fad90f80a8bd397e4f110a4c33fabf95f07403e8372", + "sha256:e1d33589e32f482d0a7d1957bf473d43341115d40d33f578dad44432e47df7b7", + "sha256:e8d1939262aa6b36d0c51f50a50a43a04b9618d20db31e6c0192b1463067aeef", + "sha256:e918d51b1fda82a65fdf52d2f3914b2246481cc2a9cd10e223e6be6078916ff3" ], "index": "pypi", - "version": "==3.0.0" + "version": "==3.0.3" }, "more-itertools": { "hashes": [ - "sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092", - "sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e", - "sha256:fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d" + "sha256:0125e8f60e9e031347105eb1682cef932f5e97d7b9a1a28d9bf00c22a5daef40", + "sha256:590044e3942351a1bdb1de960b739ff4ce277960f2425ad4509446dbace8d9d1" ], - "version": "==4.3.0" + "markers": "python_version > '2.7'", + "version": "==6.0.0" }, "networkx": { "hashes": [ @@ -258,58 +379,60 @@ }, "nltk": { "hashes": [ - "sha256:fe0eda251be65843be86d7de9abfbf7161732256f742e623b21243ec47bdb718" + "sha256:286f6797204ffdb52525a1d21ec0a221ec68b8e3fa4f2d25f412ac8e63c70e8d" ], "index": "pypi", - "version": "==3.3.0" + "version": "==3.4" }, "numpy": { "hashes": [ - "sha256:1b1cf8f7300cf7b11ddb4250b3898c711a6187df05341b5b7153db23ffe5d498", - "sha256:27a0d018f608a3fe34ac5e2b876f4c23c47e38295c47dd0775cc294cd2614bc1", - "sha256:3fde172e28c899580d32dc21cb6d4a1225d62362f61050b654545c662eac215a", - "sha256:497d7c86df4f85eb03b7f58a7dd0f8b948b1f582e77629341f624ba301b4d204", - "sha256:4e28e66cf80c09a628ae680efeb0aa9a066eb4bb7db2a5669024c5b034891576", - "sha256:58be95faf0ca2d886b5b337e7cba2923e3ad1224b806a91223ea39f1e0c77d03", - "sha256:5b4dfb6551eaeaf532054e2c6ef4b19c449c2e3a709ebdde6392acb1372ecabc", - "sha256:63f833a7c622e9082df3cbaf03b4fd92d7e0c11e2f9d87cb57dbf0e84441964b", - "sha256:71bf3b7ca15b1967bba3a1ef6a8e87286382a8b5e46ac76b42a02fe787c5237d", - "sha256:733dc5d47e71236263837825b69c975bc08728ae638452b34aeb1d6fa347b780", - "sha256:82f00a1e2695a0e5b89879aa25ea614530b8ebdca6d49d4834843d498e8a5e92", - "sha256:866bf72b9c3bfabe4476d866c70ee1714ad3e2f7b7048bb934892335e7b6b1f7", - "sha256:8aeac8b08f4b8c52129518efcd93706bb6d506ccd17830b67d18d0227cf32d9e", - "sha256:8d2cfb0aef7ec8759736cce26946efa084cdf49797712333539ef7d135e0295e", - "sha256:981224224bbf44d95278eb37996162e8beb6f144d2719b144e86dfe2fce6c510", - "sha256:981daff58fa3985a26daa4faa2b726c4e7a1d45178100125c0e1fdaf2ac64978", - "sha256:9ad36dbfdbb0cba90a08e7343fadf86f43cf6d87450e8d2b5d71d7c7202907e4", - "sha256:a251570bb3cb04f1627f23c234ad09af0e54fc8194e026cf46178f2e5748d647", - "sha256:b5ff7dae352fd9e1edddad1348698e9fea14064460a7e39121ef9526745802e6", - "sha256:c898f9cca806102fcacb6309899743aa39efb2ad2a302f4c319f54db9f05cd84", - "sha256:cf4b970042ce148ad8dce4369c02a4078b382dadf20067ce2629c239d76460d1", - "sha256:d1569013e8cc8f37e9769d19effdd85e404c976cd0ca28a94e3ddc026c216ae8", - "sha256:dca261e85fe0d34b2c242ecb31c9ab693509af2cf955d9caf01ee3ef3669abd0", - "sha256:ec8bf53ef7c92c99340972519adbe122e82c81d5b87cbd955c74ba8a8cd2a4ad", - "sha256:f2e55726a9ee2e8129d6ce6abb466304868051bcc7a09d652b3b07cd86e801a2", - "sha256:f4dee74f2626c783a3804df9191e9008946a104d5a284e52427a53ff576423cb", - "sha256:f592fd7fe1f20b5041928cce1330937eca62f9058cb41e69c2c2d83cffc0d1e3", - "sha256:ffab5b80bba8c86251291b8ce2e6c99a61446459d4c6637f5d5cc8c9ce37c972" + "sha256:1980f8d84548d74921685f68096911585fee393975f53797614b34d4f409b6da", + "sha256:22752cd809272671b273bb86df0f505f505a12368a3a5fc0aa811c7ece4dfd5c", + "sha256:23cc40313036cffd5d1873ef3ce2e949bdee0646c5d6f375bf7ee4f368db2511", + "sha256:2b0b118ff547fecabc247a2668f48f48b3b1f7d63676ebc5be7352a5fd9e85a5", + "sha256:3a0bd1edf64f6a911427b608a894111f9fcdb25284f724016f34a84c9a3a6ea9", + "sha256:3f25f6c7b0d000017e5ac55977a3999b0b1a74491eacb3c1aa716f0e01f6dcd1", + "sha256:4061c79ac2230594a7419151028e808239450e676c39e58302ad296232e3c2e8", + "sha256:560ceaa24f971ab37dede7ba030fc5d8fa173305d94365f814d9523ffd5d5916", + "sha256:62be044cd58da2a947b7e7b2252a10b42920df9520fc3d39f5c4c70d5460b8ba", + "sha256:6c692e3879dde0b67a9dc78f9bfb6f61c666b4562fd8619632d7043fb5b691b0", + "sha256:6f65e37b5a331df950ef6ff03bd4136b3c0bbcf44d4b8e99135d68a537711b5a", + "sha256:7a78cc4ddb253a55971115f8320a7ce28fd23a065fc33166d601f51760eecfa9", + "sha256:80a41edf64a3626e729a62df7dd278474fc1726836552b67a8c6396fd7e86760", + "sha256:893f4d75255f25a7b8516feb5766c6b63c54780323b9bd4bc51cdd7efc943c73", + "sha256:972ea92f9c1b54cc1c1a3d8508e326c0114aaf0f34996772a30f3f52b73b942f", + "sha256:9f1d4865436f794accdabadc57a8395bd3faa755449b4f65b88b7df65ae05f89", + "sha256:9f4cd7832b35e736b739be03b55875706c8c3e5fe334a06210f1a61e5c2c8ca5", + "sha256:adab43bf657488300d3aeeb8030d7f024fcc86e3a9b8848741ea2ea903e56610", + "sha256:bd2834d496ba9b1bdda3a6cf3de4dc0d4a0e7be306335940402ec95132ad063d", + "sha256:d20c0360940f30003a23c0adae2fe50a0a04f3e48dc05c298493b51fd6280197", + "sha256:d3b3ed87061d2314ff3659bb73896e622252da52558f2380f12c421fbdee3d89", + "sha256:dc235bf29a406dfda5790d01b998a1c01d7d37f449128c0b1b7d1c89a84fae8b", + "sha256:fb3c83554f39f48f3fa3123b9c24aecf681b1c289f9334f8215c1d3c8e2f6e5b" ], "index": "pypi", - "version": "==1.15.2" + "version": "==1.16.2" }, "openpyxl": { "hashes": [ - "sha256:22904d7bdfaaab33d65d50a0915a65eeb2f29c85d9ec53081563850678a29927" + "sha256:70da6b45a5925285b6a3d93570b45f4402eb2d335740163a58eef533b139565c" ], "index": "pypi", - "version": "==2.5.8" + "version": "==2.6.0" + }, + "packaging": { + "hashes": [ + "sha256:0c98a5d0be38ed775798ece1b9727178c4469d9c3b4ada66e8e6b7849f8732af", + "sha256:9e1cbf8c12b1f1ce0bb5344b8d7ecf66a6f8a6e91bcb0c84593ed6d3ab5c4ab3" + ], + "version": "==19.0" }, "parso": { "hashes": [ - "sha256:35704a43a3c113cce4de228ddb39aab374b8004f4f2407d070b6a2ca784ce8a2", - "sha256:895c63e93b94ac1e1690f5fdd40b65f07c8171e3e53cbd7793b5b96c0e0a7f24" + "sha256:4580328ae3f548b358f4901e38c0578229186835f0fa0846e47369796dd5bcc9", + "sha256:68406ebd7eafe17f8e40e15a84b56848eccbf27d7c1feb89e93d8fca395706db" ], - "version": "==0.3.1" + "version": "==0.3.4" }, "pexpect": { "hashes": [ @@ -328,19 +451,18 @@ }, "pluggy": { "hashes": [ - "sha256:6e3836e39f4d36ae72840833db137f7b7d35105079aee6ec4a62d9f80d594dd1", - "sha256:95eb8364a4708392bae89035f45341871286a333f749c3141c20573d2b3876e1" + "sha256:19ecf9ce9db2fce065a7a0586e07cfb4ac8614fe96edf628a264b1c70116cf8f", + "sha256:84d306a647cc805219916e62aab89caa97a33a1dd8c342e87a37f91073cd4746" ], - "markers": "python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==0.7.1" + "version": "==0.9.0" }, "prompt-toolkit": { "hashes": [ - "sha256:5eff0c9fd652384ecfe730bbcdf3658868725c6928fbf608d9338834d7a974b6", - "sha256:81da9ecf6ca6806a549697529af8ec3ac5b739c13ac14607218e650db1b53131", - "sha256:c67c1c264d8a0d9e1070e9272bacee00f76c81daab7bc4bf09ff991bd1e224a7" + "sha256:11adf3389a996a6d45cc277580d0d53e8a5afd281d0c9ec71b28e6f121463780", + "sha256:2519ad1d8038fd5fc8e770362237ad0364d16a7650fb5724af6997ed5515e3c1", + "sha256:977c6583ae813a37dc1c2e1b715892461fcbdaa57f6fc62f33a528c4886c8f55" ], - "version": "==2.0.5" + "version": "==2.0.9" }, "ptyprocess": { "hashes": [ @@ -351,168 +473,196 @@ }, "py": { "hashes": [ - "sha256:06a30435d058473046be836d3fc4f27167fd84c45b99704f2fb5509ef61f9af1", - "sha256:50402e9d1c9005d759426988a492e0edaadb7f4e68bcddfea586bc7432d009c6" + "sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa", + "sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53" ], - "markers": "python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==1.6.0" + "version": "==1.8.0" }, "pydot": { "hashes": [ - "sha256:92d2e2d15531d00710f2d6fb5540d2acabc5399d464f2f20d5d21073af241eb6" + "sha256:67be714300c78fda5fd52f79ec994039e3f76f074948c67b5ff539b433ad354f", + "sha256:d49c9d4dd1913beec2a997f831543c8cbd53e535b1a739e921642fe416235f01" ], "index": "pypi", - "version": "==1.2.4" + "version": "==1.4.1" }, "pygments": { "hashes": [ - "sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d", - "sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc" + "sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a", + "sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d" ], - "version": "==2.2.0" + "version": "==2.3.1" }, "pyparsing": { "hashes": [ - "sha256:bc6c7146b91af3f567cf6daeaec360bc07d45ffec4cf5353f4d7a208ce7ca30a", - "sha256:d29593d8ebe7b57d6967b62494f8c72b03ac0262b1eed63826c6f788b3606401" + "sha256:66c9268862641abcac4a96ba74506e594c884e3f57690a696d21ad8210ed667a", + "sha256:f6c5ef0d7480ad048c054c37632c67fca55299990fff127850181659eea33fc3" ], - "markers": "python_version != '3.2.*' and python_version != '3.1.*' and python_version >= '2.6' and python_version != '3.0.*'", - "version": "==2.2.2" + "version": "==2.3.1" }, "pytest": { "hashes": [ - "sha256:7e258ee50338f4e46957f9e09a0f10fb1c2d05493fa901d113a8dafd0790de4e", - "sha256:9332147e9af2dcf46cd7ceb14d5acadb6564744ddff1fe8c17f0ce60ece7d9a2" + "sha256:067a1d4bf827ffdd56ad21bd46674703fce77c5957f6c1eef731f6146bfcef1c", + "sha256:9687049d53695ad45cf5fdc7bbd51f0c49f1ea3ecfc4b7f3fde7501b541f17f4" + ], + "index": "pypi", + "version": "==4.3.0" + }, + "pytest-asyncio": { + "hashes": [ + "sha256:9fac5100fd716cbecf6ef89233e8590a4ad61d729d1732e0a96b84182df1daaf", + "sha256:d734718e25cfc32d2bf78d346e99d33724deeba774cc4afdf491530c6184b63b" + ], + "index": "pypi", + "version": "==0.10.0" + }, + "pytest-cov": { + "hashes": [ + "sha256:0ab664b25c6aa9716cbf203b17ddb301932383046082c081b9848a0edf5add33", + "sha256:230ef817450ab0699c6cc3c9c8f7a829c34674456f2ed8df1fe1d39780f7c87f" ], "index": "pypi", - "version": "==3.8.2" + "version": "==2.6.1" }, "python-dateutil": { "hashes": [ - "sha256:1adb80e7a782c12e52ef9a8182bebeb73f1d7e24e374397af06fb4956c8dc5c0", - "sha256:e27001de32f627c22380a688bcc43ce83504a7bc5da472209b4c70f02829f0b8" + "sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb", + "sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e" ], - "version": "==2.7.3" + "version": "==2.8.0" + }, + "python-jsonrpc-server": { + "hashes": [ + "sha256:533434fa982eb42c36ddb0b6758cef8e6eaf46d014f76b70a401b8790a3e6d57" + ], + "index": "pypi", + "version": "==0.0.2" + }, + "pytz": { + "hashes": [ + "sha256:32b0891edff07e28efe91284ed9c31e123d84bea3fd98e1f72be2508f43ef8d9", + "sha256:d5f05e487007e29e03409f9398d074e158d920d36eb82eaf66fb1136b0c5374c" + ], + "version": "==2018.9" }, "pyzmq": { "hashes": [ - "sha256:25a0715c8f69cf72f67cfe5a68a3f3ed391c67c063d2257bec0fe7fc2c7f08f8", - "sha256:2bab63759632c6b9e0d5bf19cc63c3b01df267d660e0abcf230cf0afaa966349", - "sha256:30ab49d99b24bf0908ebe1cdfa421720bfab6f93174e4883075b7ff38cc555ba", - "sha256:32c7ca9fc547a91e3c26fc6080b6982e46e79819e706eb414dd78f635a65d946", - "sha256:41219ae72b3cc86d97557fe5b1ef5d1adc1057292ec597b50050874a970a39cf", - "sha256:4b8c48a9a13cea8f1f16622f9bd46127108af14cd26150461e3eab71e0de3e46", - "sha256:55724997b4a929c0d01b43c95051318e26ddbae23565018e138ae2dc60187e59", - "sha256:65f0a4afae59d4fc0aad54a917ab599162613a761b760ba167d66cc646ac3786", - "sha256:6f88591a8b246f5c285ee6ce5c1bf4f6bd8464b7f090b1333a446b6240a68d40", - "sha256:75022a4c60dcd8765bb9ca32f6de75a0ec83b0d96e0309dc479f4c7b21f26cb7", - "sha256:76ea493bfab18dcb090d825f3662b5612e2def73dffc196d51a5194b0294a81d", - "sha256:7b60c045b80709e4e3c085bab9b691e71761b44c2b42dbb047b8b498e7bc16b3", - "sha256:8e6af2f736734aef8ed6f278f9f552ec7f37b1a6b98e59b887484a840757f67d", - "sha256:9ac2298e486524331e26390eac14e4627effd3f8e001d4266ed9d8f1d2d31cce", - "sha256:9ba650f493a9bc1f24feca1d90fce0e5dd41088a252ac9840131dfbdbf3815ca", - "sha256:a02a4a385e394e46012dc83d2e8fd6523f039bb52997c1c34a2e0dd49ed839c1", - "sha256:a3ceee84114d9f5711fa0f4db9c652af0e4636c89eabc9b7f03a3882569dd1ed", - "sha256:a72b82ac1910f2cf61a49139f4974f994984475f771b0faa730839607eeedddf", - "sha256:ab136ac51027e7c484c53138a0fab4a8a51e80d05162eb7b1585583bcfdbad27", - "sha256:c095b224300bcac61e6c445e27f9046981b1ac20d891b2f1714da89d34c637c8", - "sha256:c5cc52d16c06dc2521340d69adda78a8e1031705924e103c0eb8fc8af861d810", - "sha256:d612e9833a89e8177f8c1dc68d7b4ff98d3186cd331acd616b01bbdab67d3a7b", - "sha256:e828376a23c66c6fe90dcea24b4b72cd774f555a6ee94081670872918df87a19", - "sha256:e9767c7ab2eb552796440168d5c6e23a99ecaade08dda16266d43ad461730192", - "sha256:ebf8b800d42d217e4710d1582b0c8bff20cdcb4faad7c7213e52644034300924" - ], - "markers": "python_version != '3.0*' and python_version != '3.2*' and python_version >= '2.7' and python_version != '3.1*'", - "version": "==17.1.2" + "sha256:07a03450418694fb07e76a0191b6bc9f411afc8e364ca2062edcf28bb0e51c63", + "sha256:15f0bf7cd80020f165635595e197603aedb37fddf4164ad5ae226afc43242f7b", + "sha256:1756dc72e192c670490e38c788c3a35f901adc74ee436e5131d5a3e85fdd7dc6", + "sha256:1d1eb490da54679d724b08ef3ee04530849023670c4ba7e400ed2cdf906720c4", + "sha256:228402625796821f08706f58cc42a3c51c9897d723550babaefe4feec2b6dacc", + "sha256:264ac9dcee6a7af2bce4b61f2d19e5926118a5caa629b50f107ef6318670a364", + "sha256:2b5a43da65f5dec857184d5c2ce13b80071019e96358f146bdecff7238765bc9", + "sha256:3928534fa00a2aabfcfdb439c08ba37fbe99ab0cf57776c8db8d2b73a51693ba", + "sha256:3d2a295b1086d450981f73d3561ac204a0cc9c8ded386a4a34327d918f3b1d0a", + "sha256:411def5b4cbe6111856040a55c8048df113882e90c57ce88de4a48f0189441ac", + "sha256:4b77e96a7ffc1c5e08eaf274db554f227b31717d086adca1bb42b12ef35a7194", + "sha256:4c87fa3e449e1f4ab9170cdfe8213dc0ba34a11b160e6adecafa892e451a29b6", + "sha256:4fd8621a309db6ec23ef1369f43cdf7a9b0dc217d8ff9ca4095a6e932b379bda", + "sha256:54fe55a1694ffe608c8e4c5183e83cab7a91f3e5c84bd6f188868d6676c12aba", + "sha256:60acabd86808a16a895a247fd2bf7a127284a33562d79687bb5df163cff068b2", + "sha256:618887be4ad754228c0cbba7631f6574608b4430fe93974e6322324f1304fdac", + "sha256:69130efb6efa936de601cb135a8a4eec1caccd4ea2b784237145ff4075c2d3ae", + "sha256:6e7f78eeac82140bde7e60e975c6e6b1b678a4dd377782ab63319c1c78bf3aa1", + "sha256:6ee760cdb84e43574da6b3f2f1fc1251e8acf87253900d28a06451c5f5de39e9", + "sha256:75c87f1dc1e65cea4b709f2ebc78fa18d4b475e41463502aec9cd26208b88e0f", + "sha256:97cb1b7cd2c46e87b0a26651eccd2bbb8c758035efd1635ebb81ac36aa76a88c", + "sha256:abfa774dbadacc849121ed92eae05189d226daab583388b499472e1bbb17ef69", + "sha256:ae3d2627d74195ddc95675f2f814aca998381b73dc4341b9e10e3e191e1bdb0b", + "sha256:b30c339eb58355f51f4f54dd61d785f1ff58c86bca1c3a5916977631d121867b", + "sha256:cbabdced5b137cd56aa22633f13ac5690029a0ad43ab6c05f53206e489178362" + ], + "version": "==18.0.0" }, "requests": { "hashes": [ - "sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1", - "sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a" + "sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e", + "sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b" ], "index": "pypi", - "version": "==2.19.1" + "version": "==2.21.0" }, "scikit-learn": { "hashes": [ - "sha256:1ca280bbdeb0f9950f9427c71e29d9f14e63b2ffa3e8fdf95f25e13773e6d898", - "sha256:33ad23aa0928c64567a24aac771aea4e179fab2a20f9f786ab00ca9fe0a13c82", - "sha256:344bc433ccbfbadcac8c16b4cec9d7c4722bcea9ce19f6da42e2c2f805571941", - "sha256:35ee532b5e992a6e8d8a71d325fd9e0b58716894657e7d3da3e7a1d888c2e7d4", - "sha256:37cbbba2d2a3895bba834d50488d22268a511279e053135bb291f637fe30512b", - "sha256:40cf1908ee712545f4286cc21f3ee21f3466c81438320204725ab37c96849f27", - "sha256:4130760ac54f5946523c1a1fb32a6c0925e5245f77285270a8f6fb5901b7b733", - "sha256:46cc8c32496f02affde7abe507af99cd752de0e41aec951a0bc40c693c2a1e07", - "sha256:4a364cf22be381a17c05ada9f9ce102733a0f75893c51b83718cd9358444921e", - "sha256:56aff3fa3417cd69807c1c74db69aee34ce08d7161cbdfebbff9b4023d9d224b", - "sha256:58debb34a15cfc03f4876e450068dbd711d9ec36ae5503ed2868f2c1f88522f7", - "sha256:7bcf7ade62ef3443470af32afb82646640d653f42502cf31a13cc17d3ff85d57", - "sha256:7d4eab203ed260075f47e2bf6a2bd656367e4e8683b3ad46d4651070c5d1e9aa", - "sha256:86697c6e4c2d74fbbf110c6d5979d34196a55108fa9896bf424f9795a8d935ad", - "sha256:911115db6669c9b11efd502dcc5483cd0c53e4e3c4bcdfe2e73bbb27eb5e81da", - "sha256:97d1d971f8ec257011e64b7d655df68081dd3097322690afa1a71a1d755f8c18", - "sha256:99f22c3228ec9ab3933597825dc7d595b6c8c7b9ae725cfa557f16353fac8314", - "sha256:a2e18e5a4095b3ca4852eb087d28335f3bb8515df4ccf906d380ee627613837f", - "sha256:a3070f71a4479a9827148609f24f2978f10acffa3b8012fe9606720d271066bd", - "sha256:a6a197499429d2eaa2ae922760aa3966ef353545422d5f47ea2ca9369cbf7d26", - "sha256:a7f6f5b3bc7b8e2066076098788579af12bd507ccea8ca6859e52761aa61eaca", - "sha256:a82b90b6037fcc6b311431395c11b02555a3fbf96921a0667c8f8b0c495991cb", - "sha256:ab2c4266b8cd159a266eb03c709ad5400756dca9c45aa48fb523263344475093", - "sha256:b983a2dfdb9d707c78790608bcfd63692e5c2d996865a9689f3db768d0a2978d", - "sha256:bb33d447f4c6fb164d426467d7bf8a4901c303333c5809b85319b2e0626763cd", - "sha256:bc2a0116a67081167f1fbfed731d361671e5925db291b70e65fa66170045c53f", - "sha256:bd189f6d0c2fdccb7c0d3fd1227c6626dc17d00257edbb63dd7c88f31928db61", - "sha256:d393f810da9cd4746cad7350fb89f0509c3ae702c79d2ba8bd875201be4102d1" - ], - "markers": "python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==0.20.0" + "sha256:05d061606657af85365b5f71484e3362d924429edde17a90068960843ad597f5", + "sha256:071317afbb5c67fa493635376ddd724b414290255cbf6947c1155846956e93f7", + "sha256:0d03aaf19a25e59edac3099cda6879ba05129f0fa1e152e23b728ccd36104f57", + "sha256:1665ea0d4b75ef24f5f2a9d1527b7296eeabcbe3a1329791c954541e2ebde5a2", + "sha256:24eccb0ff31f84e88e00936c09197735ef1dcabd370aacb10e55dbc8ee464a78", + "sha256:27b48cabacce677a205e6bcda1f32bdc968fbf40cd2aa0a4f52852f6997fce51", + "sha256:2c51826b9daa87d7d356bebd39f8665f7c32e90e3b21cbe853d6c7f0d6b0d23b", + "sha256:3116299d392bd1d054655fa2a740e7854de87f1d573fa85503e64494e52ac795", + "sha256:3771861abe1fd1b2bbeaec7ba8cfca58fdedd75d790f099960e5332af9d1ff7a", + "sha256:473ba7d9a5eaec47909ee83d74b4a3be47a44505c5189d2cab67c0418cd030f1", + "sha256:621e2c91f9afde06e9295d128cb15cb6fc77dc00719393e9ec9d47119895b0d4", + "sha256:645865462c383e5faad473b93145a8aee97d839c9ad1fd7a17ae54ec8256d42b", + "sha256:80e2276d4869d302e84b7c03b5bac4a67f6cd331162e62ae775a3e5855441a60", + "sha256:84d2cfe0dee3c22b26364266d69850e0eb406d99714045929875032f91d3c918", + "sha256:87ea9ace7fe811638dfc39b850b60887509b8bfc93c4006d5552fa066d04ddc7", + "sha256:a4d1e535c75881f668010e6e53dfeb89dd50db85b05c5c45af1991c8b832d757", + "sha256:a4f14c4327d2e44567bfb3a0bee8c55470f820bc9a67af3faf200abd8ed79bf2", + "sha256:a7b3c24e193e8c6eaeac075b5d0bb0a7fea478aa2e4b991f6a7b030fc4fd410d", + "sha256:ab2919aca84f1ac6ef60a482148eec0944364ab1832e63f28679b16f9ef279c8", + "sha256:b0f79d5ff74f3c68a4198ad5b4dfa891326b5ce272dd064d11d572b25aae5b43", + "sha256:bc5bc7c7ee2572a1edcb51698a6caf11fae554194aaab9a38105d9ec419f29e6", + "sha256:bc5c750d548795def79576533f8f0f065915f17f48d6e443afce2a111f713747", + "sha256:c68969c30b3b2c1fe07c1376110928eade61da4fc29c24c9f1a89435a7d08abe", + "sha256:d3b4f791d2645fe936579d61f1ff9b5dcf0c8f50db7f0245ca8f16407d7a5a46", + "sha256:dac0cd9fdd8ac6dd6108a10558e2e0ca1b411b8ea0a3165641f9ab0b4322df4e", + "sha256:eb7ddbdf33eb822fdc916819b0ab7009d954eb43c3a78e7dd2ec5455e074922a", + "sha256:ed537844348402ed53420187b3a6948c576986d0b2811a987a49613b6a26f29e", + "sha256:fcca54733e692fe03b8584f7d4b9344f4b6e3a74f5b326c6e5f5e9d2504bdce7" + ], + "version": "==0.20.2" }, "scipy": { "hashes": [ - "sha256:0611ee97296265af4a21164a5323f8c1b4e8e15c582d3dfa7610825900136bb7", - "sha256:08237eda23fd8e4e54838258b124f1cd141379a5f281b0a234ca99b38918c07a", - "sha256:0e645dbfc03f279e1946cf07c9c754c2a1859cb4a41c5f70b25f6b3a586b6dbd", - "sha256:0e9bb7efe5f051ea7212555b290e784b82f21ffd0f655405ac4f87e288b730b3", - "sha256:108c16640849e5827e7d51023efb3bd79244098c3f21e4897a1007720cb7ce37", - "sha256:340ef70f5b0f4e2b4b43c8c8061165911bc6b2ad16f8de85d9774545e2c47463", - "sha256:3ad73dfc6f82e494195144bd3a129c7241e761179b7cb5c07b9a0ede99c686f3", - "sha256:3b243c77a822cd034dad53058d7c2abf80062aa6f4a32e9799c95d6391558631", - "sha256:404a00314e85eca9d46b80929571b938e97a143b4f2ddc2b2b3c91a4c4ead9c5", - "sha256:423b3ff76957d29d1cce1bc0d62ebaf9a3fdfaf62344e3fdec14619bb7b5ad3a", - "sha256:42d9149a2fff7affdd352d157fa5717033767857c11bd55aa4a519a44343dfef", - "sha256:625f25a6b7d795e8830cb70439453c9f163e6870e710ec99eba5722775b318f3", - "sha256:698c6409da58686f2df3d6f815491fd5b4c2de6817a45379517c92366eea208f", - "sha256:729f8f8363d32cebcb946de278324ab43d28096f36593be6281ca1ee86ce6559", - "sha256:8190770146a4c8ed5d330d5b5ad1c76251c63349d25c96b3094875b930c44692", - "sha256:878352408424dffaa695ffedf2f9f92844e116686923ed9aa8626fc30d32cfd1", - "sha256:8b984f0821577d889f3c7ca8445564175fb4ac7c7f9659b7c60bef95b2b70e76", - "sha256:8f841bbc21d3dad2111a94c490fb0a591b8612ffea86b8e5571746ae76a3deac", - "sha256:c22b27371b3866c92796e5d7907e914f0e58a36d3222c5d436ddd3f0e354227a", - "sha256:d0cdd5658b49a722783b8b4f61a6f1f9c75042d0e29a30ccb6cacc9b25f6d9e2", - "sha256:d40dc7f494b06dcee0d303e51a00451b2da6119acbeaccf8369f2d29e28917ac", - "sha256:d8491d4784aceb1f100ddb8e31239c54e4afab8d607928a9f7ef2469ec35ae01", - "sha256:dfc5080c38dde3f43d8fbb9c0539a7839683475226cf83e4b24363b227dfe552", - "sha256:e24e22c8d98d3c704bb3410bce9b69e122a8de487ad3dbfe9985d154e5c03a40", - "sha256:e7a01e53163818d56eabddcafdc2090e9daba178aad05516b20c6591c4811020", - "sha256:ee677635393414930541a096fc8e61634304bb0153e4e02b75685b11eba14cae", - "sha256:f0521af1b722265d824d6ad055acfe9bd3341765735c44b5a4d0069e189a0f40", - "sha256:f25c281f12c0da726c6ed00535ca5d1622ec755c30a3f8eafef26cf43fede694" + "sha256:014cb900c003b5ac81a53f2403294e8ecf37aedc315b59a6b9370dce0aa7627a", + "sha256:281a34da34a5e0de42d26aed692ab710141cad9d5d218b20643a9cb538ace976", + "sha256:588f9cc4bfab04c45fbd19c1354b5ade377a8124d6151d511c83730a9b6b2338", + "sha256:5a10661accd36b6e2e8855addcf3d675d6222006a15795420a39c040362def66", + "sha256:628f60be272512ca1123524969649a8cb5ae8b31cca349f7c6f8903daf9034d7", + "sha256:6dcc43a88e25b815c2dea1c6fac7339779fc988f5df8396e1de01610604a7c38", + "sha256:70e37cec0ac0fe95c85b74ca4e0620169590fd5d3f44765f3c3a532cedb0e5fd", + "sha256:7274735fb6fb5d67d3789ddec2cd53ed6362539b41aa6cc0d33a06c003aaa390", + "sha256:78e12972e144da47326958ac40c2bd1c1cca908edc8b01c26a36f9ffd3dce466", + "sha256:790cbd3c8d09f3a6d9c47c4558841e25bac34eb7a0864a9def8f26be0b8706af", + "sha256:79792c8fe8e9d06ebc50fe23266522c8c89f20aa94ac8e80472917ecdce1e5ba", + "sha256:865afedf35aaef6df6344bee0de391ee5e99d6e802950a237f9fb9b13e441f91", + "sha256:870fd401ec7b64a895cff8e206ee16569158db00254b2f7157b4c9a5db72c722", + "sha256:963815c226b29b0176d5e3d37fc9de46e2778ce4636a5a7af11a48122ef2577c", + "sha256:9726791484f08e394af0b59eb80489ad94d0a53bbb58ab1837dcad4d58489863", + "sha256:9de84a71bb7979aa8c089c4fb0ea0e2ed3917df3fb2a287a41aaea54bbad7f5d", + "sha256:b2c324ddc5d6dbd3f13680ad16a29425841876a84a1de23a984236d1afff4fa6", + "sha256:b86ae13c597fca087cb8c193870507c8916cefb21e52e1897da320b5a35075e5", + "sha256:ba0488d4dbba2af5bf9596b849873102d612e49a118c512d9d302ceafa36e01a", + "sha256:d78702af4102a3a4e23bb7372cec283e78f32f5573d92091aa6aaba870370fe1", + "sha256:def0e5d681dd3eb562b059d355ae8bebe27f5cc455ab7c2b6655586b63d3a8ea", + "sha256:e085d1babcb419bbe58e2e805ac61924dac4ca45a07c9fa081144739e500aa3c", + "sha256:e2cfcbab37c082a5087aba5ff00209999053260441caadd4f0e8f4c2d6b72088", + "sha256:e742f1f5dcaf222e8471c37ee3d1fd561568a16bb52e031c25674ff1cf9702d5", + "sha256:f06819b028b8ef9010281e74c59cb35483933583043091ed6b261bb1540f11cc", + "sha256:f15f2d60a11c306de7700ee9f65df7e9e463848dbea9c8051e293b704038da60", + "sha256:f31338ee269d201abe76083a990905473987371ff6f3fdb76a3f9073a361cf37", + "sha256:f6b88c8d302c3dac8dff7766955e38d670c82e0d79edfc7eae47d6bb2c186594" ], "index": "pypi", - "version": "==1.1.0" + "version": "==1.2.1" }, - "simplegeneric": { + "singledispatch": { "hashes": [ - "sha256:dc972e06094b9af5b855b3df4a646395e43d1c9d0d39ed345b7393560d0b9173" + "sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c", + "sha256:833b46966687b3de7f438c761ac475213e53b306740f1abfaa86e1d1aae56aa8" ], - "version": "==0.8.1" + "version": "==3.4.0.3" }, "six": { "hashes": [ - "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", - "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" + "sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", + "sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" ], - "version": "==1.11.0" + "version": "==1.12.0" }, "sklearn": { "hashes": [ @@ -523,23 +673,44 @@ }, "smmap2": { "hashes": [ - "sha256:0dd53d991af487f9b22774fa89451358da3607c02b9b886a54736c6a313ece0b", - "sha256:dc216005e529d57007ace27048eb336dcecb7fc413cfb3b2f402bb25972b69c6" + "sha256:0555a7bf4df71d1ef4218e4807bbf9b201f910174e6e08af2e138d4e517b4dde", + "sha256:29a9ffa0497e7f2be94ca0ed1ca1aa3cd4cf25a1f6b4f5f87f74b46ed91d609a" ], - "version": "==2.0.4" + "version": "==2.0.5" + }, + "snowballstemmer": { + "hashes": [ + "sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128", + "sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89" + ], + "version": "==1.2.1" + }, + "sphinx": { + "hashes": [ + "sha256:b53904fa7cb4b06a39409a492b949193a1b68cc7241a1a8ce9974f86f0d24287", + "sha256:c1c00fc4f6e8b101a0d037065043460dffc2d507257f2f11acaed71fd2b0c83c" + ], + "index": "pypi", + "version": "==1.8.4" + }, + "sphinxcontrib-websupport": { + "hashes": [ + "sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd", + "sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9" + ], + "version": "==1.1.0" }, "tornado": { "hashes": [ - "sha256:0662d28b1ca9f67108c7e3b77afabfb9c7e87bde174fbda78186ecedc2499a9d", - "sha256:4e5158d97583502a7e2739951553cbd88a72076f152b4b11b64b9a10c4c49409", - "sha256:732e836008c708de2e89a31cb2fa6c0e5a70cb60492bee6f1ea1047500feaf7f", - "sha256:8154ec22c450df4e06b35f131adc4f2f3a12ec85981a203301d310abf580500f", - "sha256:8e9d728c4579682e837c92fdd98036bd5cdefa1da2aaf6acf26947e6dd0c01c5", - "sha256:d4b3e5329f572f055b587efc57d29bd051589fb5a43ec8898c77a47ec2fa2bbb", - "sha256:e5f2585afccbff22390cddac29849df463b252b711aa2ce7c5f3f342a5b3b444" + "sha256:3f8db1394416371fb3a7a56062c77366cf10f8c4d81626df0135a2b2a7e26d2f", + "sha256:434a2821caa09cf96ffccbfaf101b8c27c4f9eee8f9e5ba933c6c4dc4c7eabbe", + "sha256:50e3fe9265938d36dd1744e395669555caf161ca5bf46c56d612866cbcda4869", + "sha256:5e9565c293e904c5642752b2ae0cefe932cd201e82a81cd6ee3480b0448d0250", + "sha256:981dfdac0308eca069447e4b4fbb029cc91627f58dd7a5f35a97d6245ab5824d", + "sha256:d675dd93cdad3545e619ce609451a7f77851bd5c3f34c58a1e01f5fd119f40f9", + "sha256:e90c5fa57a84b1cbd36c656a35a3dd07d130bcca8a2b5ee8cb0f16479bb6e4e3" ], - "markers": "python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==5.1.1" + "version": "==6.0" }, "traitlets": { "hashes": [ @@ -550,11 +721,10 @@ }, "urllib3": { "hashes": [ - "sha256:a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf", - "sha256:b5725a0bd4ba422ab0e66e89e030c806576753ea3ee08554382c14e685d117b5" + "sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39", + "sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22" ], - "markers": "python_version != '3.2.*' and python_version >= '2.6' and python_version != '3.3.*' and python_version < '4' and python_version != '3.1.*' and python_version != '3.0.*'", - "version": "==1.23" + "version": "==1.24.1" }, "wcwidth": { "hashes": [ diff --git a/README.md b/README.md index 356a07f37d7..9664c714be2 100644 --- a/README.md +++ b/README.md @@ -36,23 +36,12 @@ sudo apt-get install git -y git --version ``` -4. Apply a patch to gitpython - -(Try to) apply a patch to gitpython 2.1.x: - -```bash -pipenv shell -cd misc/ -./apply_patch.py -exit -``` - -5. Add project directory to path +4. Add project directory to path Add the following line to your `~/.bashrc` file. ``` -export PATH=$PATH:/path/to/dir +export PYTHONPATH=$PYTHONPATH:/path/to/dir ``` To update your path for the remainder of the session. @@ -60,7 +49,7 @@ To update your path for the remainder of the session. source ~/.bashrc ``` -6. Install srcML for parsing C/C++ and Java +5. Install srcML for parsing C/C++ and Java Please download from [here](https://www.srcml.org/#download) and follow the [instructions](http://131.123.42.38/lmcrs/beta/README). @@ -71,7 +60,7 @@ sudo apt install libarchive-dev sudo apt install libcurl4-openssl-dev ``` -7. Check setup correctness +6. Check setup correctness ```bash pipenv run pytest test/test_analytics @@ -79,6 +68,22 @@ pipenv run pytest test/test_analytics You should see all tests passed. +## Report Test Coverage + +We use [coverage.py](https://coverage.readthedocs.io/) and [pytest-cov](https://pytest-cov.readthedocs.io/en/latest/) to compute test coverage: + +``` +# Execution +pytest --cov=persper/ test/test_analytics + +# Reporting +coverage html + +# then visit htmlcov/index.html in your browser +``` + + + ## Interactive mode with jupyter notebook 1. Install Jupyter diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000000..298ea9e213e --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000000..e2996fad074 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('..')) + + +# -- Project information ----------------------------------------------------- + +project = 'Persper Code Analytics' +copyright = '2019, Persper Foundation' +author = 'Persper Foundation' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.napoleon', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'PersperCodeAnalyticsdoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'PersperCodeAnalytics.tex', 'Persper Code Analytics Documentation', + 'Persper Foundation', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'perspercodeanalytics', 'Persper Code Analytics Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'PersperCodeAnalytics', 'Persper Code Analytics Documentation', + author, 'PersperCodeAnalytics', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000000..1802d695d21 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,18 @@ +Welcome to Persper Code Analytics's documentation! +================================================== + +.. automodule:: persper.analytics.call_commit_graph + :members: + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000000..7893348a1b7 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/notebooks/lsp-ccls-ccls.ipynb b/notebooks/lsp-ccls-ccls.ipynb new file mode 100644 index 00000000000..28eeeae008b --- /dev/null +++ b/notebooks/lsp-ccls-ccls.ipynb @@ -0,0 +1,1892 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "\n", + "logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s',\n", + " level=logging.CRITICAL)\n", + "logging.getLogger(\"persper.analytics.lsp_graph_server.callgraph.manager\").setLevel(logging.INFO)\n", + "logging.getLogger('asyncio').setLevel(logging.WARNING)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace root: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmpd69om9e8\n", + "----- Overview ------\n", + "# of commits on master: 108\n", + "# of commits on branch: 0\n", + "----- No.1 5dbde940b60cab1d6a4cd45a90aa45959f7ad84b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:49:38,759 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 687 branches from 31 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.2 0dc27bd3acfc0526ac713d15d32c8d784297b6a9 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:49:59,630 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1516 branches from 28 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.3 0718435400ed2a4ff1ae958d565dc6de5b695ab4 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:50:21,756 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1139 branches from 38 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.4 46cec6f08355cb9a985f8fce657c35fbd09a4f9a on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:50:39,501 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 886 branches from 39 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.5 a8cdadc201620993ece0be8d8e2b3d200448701b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:50:59,757 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1086 branches from 38 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.6 e54d70e464e93daa3058e6a34584f1d073609efa on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:51:17,096 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1051 branches from 22 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.7 41f2a75de02707b2972a654b48c86955b60d9385 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:51:35,236 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1128 branches from 25 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.8 fd1d8c8785a942b3efcf4e88a4ad771c344fa347 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:51:50,596 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1102 branches from 13 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.9 7f6354f9c8810321337ef705b132684497f45fa5 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:52:02,589 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 798 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.10 ca9918b8c4284d432cbf224c6dcac7dbe170ce7b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:52:17,925 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1091 branches from 15 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.11 4ed00a32622b64df920c342251a417bc8ebd27e9 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:52:33,381 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1150 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.12 e25d54c8b941d1074383333b2542f4740e668eae on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:52:50,679 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1322 branches from 20 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.13 e637de145af8aff3cbb0f0761928c6496eb9bed2 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:53:03,821 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 957 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.14 161aab3a09ba7b3cf30d540d7ed12676750e732e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:53:17,070 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 935 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.15 8b534175f5b7368d6bf8a1a0a3cdd98b6aacef89 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:53:31,372 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1041 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.16 d87d74083caa18aa990f3f9e0ca60a3d72f45067 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:54:01,717 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1926 branches from 32 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.17 7701822aa9cfcfd6f82db2b8652f24f3c477cc66 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:54:28,115 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1609 branches from 13 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.18 3cd39ae91ab53587af59abefd0431a49dd2d008b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:54:51,311 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1355 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.19 d3b57ac30108251a3334058b1b9c4dc0a7bd894b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:55:19,094 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1744 branches from 22 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.20 d01eb6b86f22e7aad01cdcc53f3c917e26329156 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:55:43,285 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1414 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.21 d26cb81854526c1ddaf95d80ed5b8c984851e9be on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:56:07,344 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1358 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.22 6970d60dca9454227a3d85006193d625f63e0603 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:56:29,110 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1157 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.23 da6fbf7c5a750036a2c5013a6edadc638d789aca on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:56:29,402 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 70 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.24 657260eeabf7b933dce952ef31c62a95c933d883 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:56:52,954 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1177 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.25 0dabbb30932ea04a718d719ae3b66822a84d9647 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:57:24,481 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 22:57:27,024 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2654 branches from 49 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.26 43072452e5666869777cddda80b4a36e5f373c86 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:57:49,788 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1274 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.27 e78945a80f1908cf5c759f47bbc35e9ecaccf244 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:58:14,737 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1314 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.28 04412f056f6b7f0c19585a14f58c4a4a699a6d21 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:58:40,763 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1410 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.29 561f747133c0f045da56076f1b60680508580cf9 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:58:58,677 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1081 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.30 f3edc6e2f010cf0eabe5605549b8e33e22891af4 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:59:13,770 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 945 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.31 ab7138bd915c9d0b0ea2585e6d8cafaa8990d30c on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:59:29,088 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 862 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.32 7c70d7fafde186f9defd58590bee0b3a7a96124b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 22:59:59,882 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:00:03,505 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 3015 branches from 72 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.33 f067f6dcc428ca53d4612b7661443d27a6a7f83a on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:00:22,231 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1133 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.34 ef1ff80a58232aa0efe3d1c7e32240bf7b9a29a3 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:00:52,943 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:00:55,921 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2950 branches from 71 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.35 6b95f51a25849122a7e0093ea2e243c9b7b64cfb on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:01:17,915 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1320 branches from 11 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.36 5f0f290d1876ff13259709f62bcbeff57ea1334c on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:01:37,582 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 991 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.37 a7f5231e4cc4bb8ea676ef93851b9edaeeb13123 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:02:10,127 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:02:13,838 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 3038 branches from 77 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.38 6a95d2f46f382a460ece15c976926967247d4ebb on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:02:41,859 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:02:43,349 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2530 branches from 47 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.39 2932b5d41b531fd51b0758b0239fecda2d286790 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:03:02,148 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 976 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.40 b33bd54922f74a92ca0d8f82301ffbeae3d95c33 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:03:21,097 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 979 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.41 bebb2306038702e0ee600926e9f4ddff66a9ce17 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:03:39,146 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 981 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.42 9b351ce52faa76aab2564e9fba5f9fd3394344f6 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:03:57,577 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1014 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.43 683cac602e7a6952abad2c280f7662edd3d7e268 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:04:31,849 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1961 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.44 358b4434c2dddaaccfff8400ce0c5497629b605a on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:05:02,159 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:05:03,906 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2249 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.45 ce4c2232d7599403388a945a53968a8ff5c2cb4c on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:05:29,698 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1549 branches from 17 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.46 d867d962d82abae8b40ed5e433a02646a5aacbe0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:05:49,879 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1231 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.47 b165cfa59d03fec0776b6b0b28f4065a587086a0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:05:55,146 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 305 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.48 0f17a49d40aacaf29a6d7eaefe3762072c0b30f7 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:06:05,985 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 796 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.49 264b687e4312fa022ebca264945af344c12b78ef on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:06:08,113 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 104 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.50 5faf9d1f6b54d89113eb0b0aac5ccb798e630fe8 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:06:20,942 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 788 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.51 d59b7c7379c4fa2653b388dcd988d58c439f2e9b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:06:39,811 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1302 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.52 f3f72a0dfabb6ac2dead437e829960d2910c65a0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:07:03,955 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:07:05,376 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2405 branches from 29 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.53 b7d9a0f815f825d0a4e6993395b15e9a0fba51c7 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:07:31,312 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:07:33,346 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2274 branches from 9 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.54 243ed8dfa523092e77ee8c6341ffffcd623fe93d on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:07:57,378 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:07:58,810 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2195 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.55 f3aa91d8dbf457e09897b52e70d3a80fa4c305f5 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:08:10,003 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1042 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.56 aaa3542670a9e4ada6973d3077df8d73d9e0ab40 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:08:34,505 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:08:41,626 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2727 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.57 bc8daee0654c4daa1ebc2b29333248678f34f70a on main -----\n", + "----- No.58 18aa28bdea347d0af19504a08e71cc42d1fcdfeb on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:09:07,540 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:09:11,405 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2526 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.59 aa677de886e23ebff3e4d6327f98730f5bed9cd2 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:09:31,911 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:09:38,203 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2646 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.60 430b9ef61bc4a106dba0e6b685b1891d351f17f3 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:09:58,608 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:10:00,234 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2191 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.61 6df9f61b56a0e3fe867f9af93530a6ef4eed55c4 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:10:15,581 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1310 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.62 c1db1766c9139ffb09525bbebc8d98a1500bdbef on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:10:21,608 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 879 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.63 7192db6e32e8ea7828ef52a50ef3d66761ac535a on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:10:35,476 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1293 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.64 a37d402ce2bcb5155fae5d1dd558b6a2d84ce4c2 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:10:37,707 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 235 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.65 05af433b5a323f0cd626aa810738f0326595005f on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:10:58,201 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:11:03,455 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2464 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.66 1f4f72013654f3b03e19f95e1612281527386256 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:11:20,372 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1199 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.67 2fc419faa302d51101c8b71b5ecdd1c8c2de7f89 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:11:37,984 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1290 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.68 0738b8f57aa4abfcd43b517b35338a51db21d75d on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:11:53,919 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 953 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.69 88f2a3541a0bcb551da8c00cf80bba29f3245467 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:03,207 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 375 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.70 dad7fcb5a3a5535744f9a52e9cce9a482ad9669e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:10,976 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 536 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.71 6ffca03d6f7baf280996e02f8b78f9a886e3f80e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:20,117 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 873 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.72 94383d589b3fcb5786290c50a9f2e08402e0e562 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:26,529 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 388 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.73 b3a544e880547a9434c3e7caad526759dc6bdef0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:34,166 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 448 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.74 253414f29b9c1d3e0dc06c807b79591fe6daad85 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:41,827 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 452 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.75 dd8fb070fd5b1bae7f7e1d7dd03131e208e107e5 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:12:47,812 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 319 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.76 15b5a03a08799659d54b58bf0f7b49d3ae5ed55b on main -----\n", + "----- No.77 2a08552265402719f5bcc8c260794b16734adc2d on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:13:11,934 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1650 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.78 8fb0fb816c45ea7fad3ac4732b72d42401d184b4 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:13:40,596 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1889 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.79 bf98dc56fb15f4d47c6f9faf245dc1d84b5907a2 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:14:17,629 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:14:37,878 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 3634 branches from 14 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.80 d403d7ad963898cfb1bd9d46220ab57359c3658e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:14:38,331 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 9 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.81 8e3615240661365c9f6650db08246ef6dd3886ae on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:15:08,228 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1691 branches from 19 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.82 e06f9472c1012235de02caf6e2d5b41688b72c85 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:15:29,193 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1178 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.83 f6967eee48f48c2b100538b40a58e3cb53829d3e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:15:45,933 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 974 branches from 6 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.84 f7e2b200591372495d93b0e56d00bce3eafe89c9 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:16:01,735 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 876 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.85 49c6f7787a4363d4a43b1197d9a66d995da2b7d0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:16:18,063 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 898 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.86 36f69e61a9b248097f7f4330d69568d54129a7db on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:16:40,232 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1266 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.87 852a4218ba60770e6effa149a3ccfecd997bd2f0 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:16:47,947 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 454 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.88 2db753ff92279f7e5a1901e9e5bc27a8d6d5eb29 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:17:02,149 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 834 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.89 7a160a526975068509461803ee99db8a2d2a4c4a on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:17:31,047 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:17:34,265 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2432 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.90 729264bb34a2e885409a0d2c19170f72d809c327 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:18:14,687 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:18:23,985 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2761 branches from 9 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.91 b1a69c2ec3f96d668b4e815adf83641289444018 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:18:30,312 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 424 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.92 94b344dbe3f79340e5d18ed7ddc3bed6fcab8a15 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:18:43,079 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1168 branches from 42 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.93 6519dc1b8de9d3a4fa53995753ca8b6fe7a1ef86 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:18:55,784 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1543 branches from 53 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.94 4e2f24ac174533d295799a6a4958da6a564f7462 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:19:05,209 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 771 branches from 15 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.95 9b89bfc905dd90205a6c3a901bdf5f1a35e535ba on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:19:18,287 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 765 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.96 afc6db80ddebda8813015a645854de6422eef32e on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:19:30,279 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 820 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.97 1508ac85d8cace7bdf9fdf456753a6024959008b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:04,554 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:20:13,549 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2624 branches from 7 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.98 7c55502fe8c4cadd24b507fe1053165f4b4c55fb on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:15,408 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 79 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.99 68d9002ecd9ac617448d25e0beeb1593c7e2df72 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:17,041 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 54 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.100 38fb4a4f1da53062cbaa881534f612cd78f8014b on main -----\n", + "------ Used time: 1848.181 -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:19,292 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 73 branches from 3 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.101 8a13acd3b867dc3cf4455f3387123ea3df89725b on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:20,304 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 3 branches from 2 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.102 f3c8500fa9c7422ffd693c02a2bb7c9fb01b3bc5 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:20:47,474 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1552 branches from 5 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.103 cc4d49794d8960b82208bfa2ec8e4f4a9f9548c9 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:21:24,708 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Already added 2000 branches.\n", + "2019-01-17 23:21:31,032 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 2806 branches from 8 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.104 fdd798f995ce3f6e66b1c3d92ad6b9dedc3218ab on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:21:56,286 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1876 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.105 1a6fd858c67110e9d88a1012db973875cccd8d2f on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:22:02,520 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 362 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.106 0b44f72ed8c0091862a1f9f0f782a5180d0a82ff on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:22:13,815 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 898 branches from 4 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.107 c079ab45b3bdda145054e14aeb61c332c2dbf3cf on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:22:17,001 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 299 branches from 1 files.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----- No.108 ebd467d31b10dbf5784f160533d747d434bbc764 on main -----\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-01-17 23:22:42,581 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 1538 branches from 4 files.\n" + ] + } + ], + "source": [ + "from persper.analytics.analyzer import Analyzer\n", + "from persper.analytics.lsp_graph_server.ccls import CclsGraphServer\n", + "from tempfile import mkdtemp\n", + "\n", + "dumpLogs = True\n", + "workspaceRoot = mkdtemp()\n", + "print(\"Workspace root: \", workspaceRoot)\n", + "G = None\n", + "async with CclsGraphServer(workspaceRoot, cacheRoot=\"./.ccls-cache\",\n", + " languageServerCommand=\"../bin/ccls\" + (\" -log-file=ccls.log\" if dumpLogs else \"\"),\n", + " dumpLogs=dumpLogs) as graphServer:\n", + " analyzer = Analyzer(\"../../../testrepos/ccls\", graphServer)\n", + " graphServer.reset_graph()\n", + " await analyzer.analyze()\n", + " G = analyzer.get_graph()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Nodes: 861\n", + "Edges: 3388\n" + ] + } + ], + "source": [ + "print(\"Nodes:\", len(G.nodes()))\n", + "print(\"Edges:\", len(G.edges()))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace clang {} | namespace clang {}\n", + "namespace clang {} | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int offset)\n", + "namespace clang {} | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int line, unsigned int column)\n", + "namespace clang {} | std::string clang::SourceLocation::get_path()\n", + "namespace clang {} | clang::Offset clang::SourceLocation::get_offset()\n", + "namespace clang {} | void clang::SourceLocation::get_data(std::string *path, unsigned int *line, unsigned int *column, unsigned int *offset)\n", + "namespace clang {} | clang::Index::Index(int excludeDeclarationsFromPCH, int displayDiagnostics)\n", + "namespace clang {} | clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic)\n", + "namespace clang {} | static const std::string clang::Diagnostic::get_severity_spelling(unsigned int severity)\n", + "namespace clang {} | int clang::SourceRange::get_offsets()\n", + "namespace clang {} | int clang::CompileCommand::get_command()\n", + "namespace clang {} | int clang::CompileCommand::get_command_as_args()\n", + "namespace clang {} | clang::CompileCommands::CompileCommands(const int &filename, clang::CompilationDatabase &db)\n", + "namespace clang {} | int clang::CompileCommands::get_commands()\n", + "namespace clang {} | clang::CompletionString::CompletionString(const int &cx_completion_sting)\n", + "namespace clang {} | std::string clang::Cursor::get_usr() const\n", + "namespace clang {} | clang::CompilationDatabase::CompilationDatabase(const int &project_path)\n", + "namespace clang {} | clang::TranslationUnit::TranslationUnit(int &index, const std::string &file_path, const int &command_line_args, const std::string &buffer, unsigned int flags)\n", + "namespace clang {} | static unsigned int clang::TranslationUnit::DefaultFlags()\n", + "namespace clang {} | int clang::TranslationUnit::ReparseTranslationUnit(const std::string &buffer, unsigned int flags)\n", + "namespace clang {} | void clang::TranslationUnit::parse(int &index, const std::string &file_path, const int &command_line_args, const int &buffers, unsigned int flags)\n", + "namespace clang {} | int clang::TranslationUnit::get_code_completions(const std::string &buffer, unsigned int line_number, unsigned int column)\n", + "namespace clang {} | int clang::TranslationUnit::get_diagnostics()\n", + "namespace clang {} | int clang::TranslationUnit::get_tokens(unsigned int start_offset, unsigned int end_offset)\n", + "namespace clang {} | int clang::TranslationUnit::get_tokens(unsigned int start_line, unsigned int start_column, unsigned int end_line, unsigned int end_column)\n", + "namespace clang {} | int clang::TranslationUnit::get_cursor(std::string path, unsigned int offset)\n", + "namespace clang {} | int clang::TranslationUnit::get_cursor(std::string path, unsigned int line, unsigned int column)\n", + "namespace clang {} | int clang::Token::get_spelling() const\n", + "namespace clang {} | clang::Cursor::Cursor()\n", + "namespace clang {} | clang::Cursor::Cursor::Cursor(const int &other)\n", + "namespace clang {} | int Cursor::get_kind() const\n", + "namespace clang {} | clang::Type clang::Cursor::get_type() const\n", + "namespace clang {} | clang::SourceLocation clang::Cursor::get_source_location() const\n", + "namespace clang {} | int Cursor::get_source_range() const\n", + "namespace clang {} | std::string clang::Type::get_spelling() const\n", + "namespace clang {} | std::string clang::Cursor::get_display_name() const\n", + "namespace clang {} | bool clang::Cursor::is_definition() const\n", + "namespace clang {} | clang::Cursor clang::Cursor::get_referenced() const\n", + "namespace clang {} | clang::Cursor clang::Cursor::get_canonical() const\n", + "namespace clang {} | clang::Cursor clang::Cursor::get_definition() const\n", + "namespace clang {} | clang::Cursor clang::Cursor::get_semantic_parent() const\n", + "namespace clang {} | int Cursor::get_arguments() const\n", + "namespace clang {} | bool clang::Cursor::is_valid_kind() const\n", + "namespace clang {} | std::string clang::Cursor::get_type_description() const\n", + "namespace clang {} | std::string clang::Cursor::get_comments() const\n", + "namespace clang {} | clang::TranslationUnit::TranslationUnit::TranslationUnit(int &index, const std::string &file_path, const int &command_line_args, const std::string &buffer, unsigned int flags)\n", + "namespace clang {} | void TranslationUnit::parse(int &index, const std::string &file_path, const int &command_line_args, const int &buffers, unsigned int flags)\n", + "namespace clang {} | int TranslationUnit::get_code_completions(const std::string &buffer, unsigned int line_number, unsigned int column)\n", + "namespace clang {} | int TranslationUnit::get_diagnostics()\n", + "namespace clang {} | int TranslationUnit::get_tokens(unsigned int start_offset, unsigned int end_offset)\n", + "namespace clang {} | int TranslationUnit::get_tokens(unsigned int start_line, unsigned int start_column, unsigned int end_line, unsigned int end_column)\n", + "namespace clang {} | int TranslationUnit::document_cursor() const\n", + "namespace clang {} | int TranslationUnit::get_cursor(std::string path, unsigned int offset)\n", + "namespace clang {} | int TranslationUnit::get_cursor(std::string path, unsigned int line, unsigned int column)\n", + "namespace clang {} | int clang::CodeCompleteResults::get_usr() const\n", + "namespace clang {} | clang::Type::Type()\n", + "namespace clang {} | clang::Type clang::Type::get_return_type() const\n", + "namespace clang {} | int Type::get_arguments() const\n", + "namespace clang {} | std::string clang::Type::get_usr() const\n", + "namespace clang {} | std::string clang::Cursor::ToString() const\n", + "namespace clang {} | clang::SourceLocation::SourceLocation::SourceLocation(const int &cx_location)\n", + "namespace clang {} | std::string clang::SourceLocation::ToString() const\n", + "namespace clang {} | clang::Type::Type::Type(const int &other)\n", + "namespace clang {} | std::string clang::Cursor::evaluate() const\n", + "namespace clang {} | clang::Type clang::Type::strip_qualifiers() const\n", + "namespace clang {} | bool clang::Type::is_fundamental() const\n", + "namespace clang {} | int Type::get_template_arguments() const\n", + "namespace clang {} | clang::SourceLocation::SourceLocation()\n", + "namespace clang {} | class Foo\n", + "namespace clang {} | clang::Cursor clang::Cursor::template_specialization_to_template_definition() const\n", + "namespace clang {} | struct FindChildOfKindParam {}\n", + "namespace clang {} | clang::CompileCommand::CompileCommand::CompileCommand(const int &command)\n", + "namespace clang {} | int CompileCommand::get_command() const\n", + "namespace clang {} | int CompileCommand::get_command_as_args() const\n", + "namespace clang {} | clang::CompileCommands::CompileCommands(const clang::CompilationDatabase &db)\n", + "namespace clang {} | struct IndexParam {}\n", + "namespace clang {} | explicit clang::CompilationDatabase::CompilationDatabase(const std::string &project_path)\n", + "namespace clang {} | int Type::get_declaration() const\n", + "namespace clang {} | struct SymbolIdx {}\n", + "namespace clang {} | struct QueryableLocation {}\n", + "clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int offset) | namespace clang {}\n", + "clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int line, unsigned int column) | namespace clang {}\n", + "clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int line, unsigned int column) | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int offset)\n", + "std::string clang::SourceLocation::get_path() | namespace clang {}\n", + "std::string clang::SourceLocation::get_path() | void clang::SourceLocation::get_data(std::string *path, unsigned int *line, unsigned int *column, unsigned int *offset)\n", + "void clang::SourceLocation::get_data(std::string *path, unsigned int *line, unsigned int *column, unsigned int *offset) | namespace clang {}\n", + "clang::Offset clang::SourceLocation::get_offset() | namespace clang {}\n", + "clang::Offset clang::SourceLocation::get_offset() | void clang::SourceLocation::get_data(std::string *path, unsigned int *line, unsigned int *column, unsigned int *offset)\n", + "int clang::CompileCommand::get_command() | namespace clang {}\n", + "int clang::CompileCommand::get_command_as_args() | namespace clang {}\n", + "int clang::CompileCommand::get_command_as_args() | int clang::CompileCommand::get_command()\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | namespace clang {}\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | static const std::string clang::Diagnostic::get_severity_spelling(unsigned int severity)\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int line, unsigned int column)\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | std::string clang::SourceLocation::get_path()\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | clang::Offset clang::SourceLocation::get_offset()\n", + "clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic) | int clang::SourceRange::get_offsets()\n", + "static const std::string clang::Diagnostic::get_severity_spelling(unsigned int severity) | namespace clang {}\n", + "clang::Index::Index(int excludeDeclarationsFromPCH, int displayDiagnostics) | namespace clang {}\n", + "clang::Index::~Index() noexcept | namespace clang {}\n", + "...\n" + ] + } + ], + "source": [ + "import itertools\n", + "for e in itertools.islice(G.edges(), 100):\n", + " print(e[0], \" | \", e[1])\n", + "if len(G.edges()) > 100:\n", + " print(\"...\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# un-pickle test\n", + "import pickle\n", + "loaded = pickle.load(open(\"ccls-finished-0.pickle\", \"rb\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "namespace clang {} | namespace clang {}\n", + "namespace clang {} | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int offset)\n", + "namespace clang {} | clang::SourceLocation::SourceLocation::SourceLocation(int &tu, const std::string &filepath, unsigned int line, unsigned int column)\n", + "namespace clang {} | std::string clang::SourceLocation::get_path()\n", + "namespace clang {} | clang::Offset clang::SourceLocation::get_offset()\n", + "namespace clang {} | void clang::SourceLocation::get_data(std::string *path, unsigned int *line, unsigned int *column, unsigned int *offset)\n", + "namespace clang {} | clang::Index::Index(int excludeDeclarationsFromPCH, int displayDiagnostics)\n", + "namespace clang {} | clang::Diagnostic::Diagnostic(int &cx_tu, int &cx_diagnostic)\n", + "namespace clang {} | static const std::string clang::Diagnostic::get_severity_spelling(unsigned int severity)\n", + "namespace clang {} | int clang::SourceRange::get_offsets()\n", + "...\n" + ] + } + ], + "source": [ + "for e in itertools.islice(loaded.get_graph().edges(), 10):\n", + " print(e[0], \" | \", e[1])\n", + "if len(loaded.get_graph().edges()) > 100:\n", + " print(\"...\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('jacobdufault@gmail.com', 0.9810053091032119)\n", + "('jdufault@google.com', 0.012631085775342622)\n" + ] + } + ], + "source": [ + "for t in sorted(G.developer_devranks(0.85).items(), key=lambda t:t[1], reverse=True):\n", + " print(t)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('namespace clang {}', 0.10416510691561254)\n", + "('struct IndexedFile {}', 0.07087220405735957)\n", + "('struct Location {}', 0.05555621850983644)\n", + "('namespace language_server_api {}', 0.037464685336337684)\n", + "('void indexDeclaration(int client_data, const int *decl)', 0.03472520932390782)\n", + "('struct FuncDefDefinitionData {}', 0.028895424093886634)\n", + "('void indexEntityReference(int client_data, const int *ref)', 0.02833920522734296)\n", + "('struct TypeDefDefinitionData {}', 0.027383332592175774)\n", + "('struct ParsingDatabase {}', 0.022854110362300492)\n", + "('std::string ToString(const int &document)', 0.02239638361525142)\n", + "('struct VarDefDefinitionData {}', 0.01947827810897668)\n", + "('int main(int argc, char **argv)', 0.019008147207863067)\n", + "('struct Task {}', 0.01884614618353126)\n", + "('struct IdCache {}', 0.015987407624868322)\n", + "('struct IpcMessage_DocumentSymbolsRequest {}', 0.013928011009341164)\n", + "('struct QueryableDatabase {}', 0.012919324401159222)\n", + "('void DiffDocuments(int &expected, int &actual)', 0.012611768871416688)\n", + "('struct IndexUpdate {}', 0.012378546855131626)\n", + "('struct FileDb {}', 0.01196728115668281)\n", + "('struct Ref {}', 0.01193202317742757)\n", + "('struct TypeDef {}', 0.011067953367303708)\n", + "('struct IndexedFuncDef', 0.010908566394791454)\n", + "('struct QueryableEntry {}', 0.010537160251545136)\n", + "('int split_string(const std::string &str, const std::string &delimiter)', 0.010378125173983973)\n", + "('struct IndexParam {}', 0.008533911695897674)\n", + "('std::string clang::Cursor::get_type_description() const', 0.008457797123827646)\n", + "('struct SymbolIdx {}', 0.008086007774187281)\n", + "('constexpr T &std::experimental::optional::value() const', 0.0076216112589732)\n", + "('struct IndexedTypeDef', 0.0071560895156070016)\n", + "('struct IndexedVarDef', 0.007133033715821756)\n", + "('int Location(bool interesting, FileId file_id, uint32_t line, uint32_t column)', 0.007066448628771181)\n", + "('enum class SymbolKind : int {}', 0.006912848701970875)\n", + "('ParsingDatabase Parse(std::string filename)', 0.0067913702697211965)\n", + "('struct BitFieldArray {}', 0.006366912671229168)\n", + "('struct Id {}', 0.006269151986432736)\n", + "('struct VarDef {}', 0.005896723758458707)\n", + "('std::string clang::Cursor::evaluate() const', 0.005549030917220253)\n", + "('struct BitFieldMember {}', 0.005470120861961143)\n", + "('namespace boost {}', 0.0053367123489663605)\n", + "('struct CachedIndexedFile {}', 0.005335658634452737)\n", + "('void HandleFunc(ParsingDatabase *db, NamespaceStack *ns, int func, int declaring_type)', 0.0050395202572343775)\n", + "('struct TypeDef', 0.004942921362123896)\n", + "('struct FindChildOfKindParam {}', 0.004811707018465551)\n", + "('void QueryDbMainLoop(int *ipc, int *db)', 0.004635264126963808)\n", + "('void writer()', 0.004239041393394854)\n", + "('struct IpcDirectionalChannel {}', 0.004222606591724942)\n", + "('std::string IndexedFile::ToString()', 0.003914891670763644)\n", + "('void reader()', 0.0037276335662578255)\n", + "('struct IdMap', 0.003698909960007504)\n", + "('struct IdMap {}', 0.0036803694125616855)\n", + "('int IpcDirectionalChannel::TakeMessages()', 0.0036664616786350667)\n", + "('int VisitFuncDefinition(int cursor, int parent, FuncDefinitionParam *param)', 0.0034997936428615207)\n", + "('class Foo', 0.00343298027102756)\n", + "('int ResolveDeclToType(ParsingDatabase *db, int decl_cursor, bool is_interesting, const int *semantic_container, const int *lexical_container)', 0.0033863963771359707)\n", + "('IndexedFile Parse(std::string filename, int args, bool dump_ast)', 0.003382847159072873)\n", + "('struct IndexedTypeDef {}', 0.003350706789987754)\n", + "('int VisitFile(int cursor, int parent, FileParam *param)', 0.0032459392067467193)\n", + "('bool IsFunction(int kind)', 0.003175717188227563)\n", + "('void clang::TranslationUnit::parse(int &index, const std::string &file_path, const int &command_line_args, const int &buffers, unsigned int flags)', 0.0030653920792708656)\n", + "('struct FileDef {}', 0.0029827759084660936)\n", + "('struct FuncDefinitionParam {}', 0.002758694142937999)\n", + "('void VisitDeclForTypeUsageVisitorHandler(int cursor, VisitDeclForTypeUsageParam *param)', 0.0027553882048806048)\n", + "('struct IndexedFuncDef {}', 0.002528391159191035)\n", + "('int clang::TranslationUnit::ReparseTranslationUnit(const std::string &buffer, unsigned int flags)', 0.002438888588616966)\n", + "('IndexUpdate::IndexUpdate(IndexedFile &previous, IndexedFile ¤t)', 0.002352122087390797)\n", + "('void WriteToFile(const std::string &filename, const std::string &content)', 0.0022592441357603276)\n", + "('bool clang::Cursor::is_valid_kind() const', 0.002171323403042084)\n", + "('int VisitClassDecl(int cursor, int parent, ClassDeclParam *param)', 0.0021582765039166282)\n", + "('struct QueryableFuncDef {}', 0.0020856582400619085)\n", + "('static unsigned int clang::TranslationUnit::DefaultFlags()', 0.002080886593957595)\n", + "('struct JsonMessage {}', 0.0020459402128250007)\n", + "('int AddDeclUsages(IndexedFile *db, int decl_cursor, bool is_interesting, const int *semantic_container, const int *lexical_container)', 0.00201270564335991)\n", + "('struct IpcDirectionalChannel::MessageBuffer', 0.0019993007028381057)\n", + "('int clang::TranslationUnit::get_tokens(unsigned int start_line, unsigned int start_column, unsigned int end_line, unsigned int end_column)', 0.001991386095292752)\n", + "('int clang::TranslationUnit::get_diagnostics()', 0.001969010970626541)\n", + "('std::string ToString()', 0.0019628876731040126)\n", + "('int Cursor::get_arguments() const', 0.0019476279951740321)\n", + "('struct ClassDeclParam {}', 0.0018981883361736543)\n", + "('int clang::TranslationUnit::get_tokens(unsigned int start_offset, unsigned int end_offset)', 0.0018616870295877258)\n", + "('struct BaseIpcMessage {}', 0.001852010555893617)\n", + "('struct VisitDeclForTypeUsageParam {}', 0.0018259155343039947)\n", + "('int VisitDeclForTypeUsageVisitor(int cursor, int parent, VisitDeclForTypeUsageParam *param)', 0.0017714792188222434)\n", + "('void HandleVarDecl(ParsingDatabase *db, NamespaceStack *ns, int var, int declaring_type, int func_id, bool declare_variable)', 0.001756397282995158)\n", + "('int VisitUsing(int cursor, int parent, UsingParam *param)', 0.0017391009245347117)\n", + "('struct UsrToIdResolver {}', 0.0017315288570247623)\n", + "('void TranslationUnit::parse(int &index, const std::string &file_path, const int &command_line_args, const int &buffers, unsigned int flags)', 0.0017228845992982237)\n", + "('struct BaseIpcMessage : BaseIpcMessageElided {}', 0.0017163981552008037)\n", + "('VarId IndexedFile::ToVarId(const std::string &usr)', 0.0017095020484755572)\n", + "('void Write(const int &strs)', 0.0016931594709985746)\n", + "('enum class Command : int {}', 0.001685452539998441)\n", + "('int mai2n(int argc, char **argv)', 0.0016762636027911257)\n", + "('std::string ParsingDatabase::ToString()', 0.0016742361305578815)\n", + "('IndexUpdate ComputeDiff(IdMap *id_map, IndexedFile &previous, IndexedFile ¤t)', 0.0016399275787473788)\n", + "('struct IndexedFileDb {}', 0.0016140476193728611)\n", + "('struct QueryableVarDef {}', 0.0016131194704164442)\n", + "('struct BaseIpcMessageElided {}', 0.00160707872062705)\n", + "('int main2(int argc, char **argv)', 0.0015949425203396002)\n", + "('struct FuncDef {}', 0.0015587623184678523)\n", + "('struct IndexedVarDef {}', 0.0015559247845952614)\n", + "('void LanguageServerMain(std::string process_name)', 0.001552187811482307)\n" + ] + } + ], + "source": [ + "for t in itertools.islice(sorted(G.function_devranks(0.85).items(), key=lambda t:t[1], reverse=True), 100):\n", + " print(t)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/lsp-ccls.ipynb b/notebooks/lsp-ccls.ipynb new file mode 100644 index 00000000000..2108a172c60 --- /dev/null +++ b/notebooks/lsp-ccls.ipynb @@ -0,0 +1,402 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "\n", + "logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s',\n", + " level=logging.INFO)\n", + "logging.getLogger(\"persper.analytics.lsp_graph_server.callgraph.manager\").setLevel(logging.INFO)\n", + "# logging.getLogger('asyncio').setLevel(logging.CRITICAL)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace root: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-02-16 23:15:41,552 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Exceptions.h.\n", + "2019-02-16 23:15:41,608 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Commit 4965d8edcb63cab6e544c1ecd19454f37d9bb0d3 (A): Going forward (initial commit).\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-02-16 23:15:41,663 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.h.\n", + "2019-02-16 23:15:41,720 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TypeTraits.h.\n", + "2019-02-16 23:15:41,776 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp.\n", + "2019-02-16 23:15:41,830 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h.\n", + "2019-02-16 23:15:41,887 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp.\n", + "2019-02-16 23:15:41,941 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\stdafx.cpp.\n", + "2019-02-16 23:15:41,995 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\stdafx.h.\n", + "2019-02-16 23:15:41,998 INFO [persper.analytics.lsp_graph_server] Invalidated 9 files, affected 9 files.\n", + "2019-02-16 23:15:42,871 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp\n", + "2019-02-16 23:15:43,083 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 86 branches.\n", + "2019-02-16 23:15:43,084 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h\n", + "2019-02-16 23:15:43,339 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 66 branches.\n", + "2019-02-16 23:15:43,340 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\stdafx.cpp\n", + "2019-02-16 23:15:43,393 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 0 branches.\n", + "2019-02-16 23:15:43,394 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Exceptions.h\n", + "2019-02-16 23:15:43,497 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 18 branches.\n", + "2019-02-16 23:15:43,498 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp\n", + "2019-02-16 23:15:43,559 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 4 branches.\n", + "2019-02-16 23:15:43,561 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.h\n", + "2019-02-16 23:15:43,715 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 39 branches.\n", + "2019-02-16 23:15:43,716 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TypeTraits.h\n", + "2019-02-16 23:15:43,773 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 1 branches.\n", + "2019-02-16 23:15:43,774 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\stdafx.h\n", + "2019-02-16 23:15:43,830 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 1 branches.\n", + "2019-02-16 23:15:43,832 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp\n", + "2019-02-16 23:15:43,950 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 27 branches.\n", + "2019-02-16 23:15:43,951 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 242 branches from 9 files.\n", + "2019-02-16 23:15:44,115 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility-1.cpp.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Commit 12a65d92071e8ab32890bc0f69697b2efcb013f8 (B): Going forward.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-02-16 23:15:44,224 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp.\n", + "2019-02-16 23:15:44,226 INFO [persper.analytics.lsp_graph_server] Invalidated 3 files, affected 3 files.\n", + "2019-02-16 23:15:44,227 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Confirm deleted: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp\n", + "2019-02-16 23:15:44,358 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp\n", + "2019-02-16 23:15:44,423 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 5 branches.\n", + "2019-02-16 23:15:44,424 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility-1.cpp\n", + "2019-02-16 23:15:44,546 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 27 branches.\n", + "2019-02-16 23:15:44,547 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 32 branches from 2 files.\n", + "2019-02-16 23:15:45,728 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Commit 4928d2ec0ad82221b61b30f3ae2e1cc4c61a3ea0 (C): Going forward.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-02-16 23:15:45,853 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp.\n", + "2019-02-16 23:15:45,968 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h.\n", + "2019-02-16 23:15:46,083 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp.\n", + "2019-02-16 23:15:46,085 INFO [persper.analytics.lsp_graph_server] Invalidated 5 files, affected 5 files.\n", + "2019-02-16 23:15:46,086 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Confirm deleted: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility-1.cpp\n", + "2019-02-16 23:15:46,489 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp\n", + "2019-02-16 23:15:46,710 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 86 branches.\n", + "2019-02-16 23:15:46,711 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h\n", + "2019-02-16 23:15:46,982 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 66 branches.\n", + "2019-02-16 23:15:46,984 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp\n", + "2019-02-16 23:15:47,046 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 4 branches.\n", + "2019-02-16 23:15:47,047 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp\n", + "2019-02-16 23:15:47,170 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 27 branches.\n", + "2019-02-16 23:15:47,171 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 183 branches from 4 files.\n", + "2019-02-16 23:15:47,338 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Commit 639934cdc7499854e22df79835240ac786498300 (D): Going forward.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2019-02-16 23:15:47,452 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp.\n", + "2019-02-16 23:15:47,559 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h.\n", + "2019-02-16 23:15:47,665 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Modified C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp.\n", + "2019-02-16 23:15:47,667 INFO [persper.analytics.lsp_graph_server] Invalidated 4 files, affected 4 files.\n", + "2019-02-16 23:15:48,070 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\TextFileParsers.cpp\n", + "2019-02-16 23:15:48,284 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 86 branches.\n", + "2019-02-16 23:15:48,285 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\main.cpp\n", + "2019-02-16 23:15:48,345 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 2 branches.\n", + "2019-02-16 23:15:48,346 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.h\n", + "2019-02-16 23:15:48,606 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 66 branches.\n", + "2019-02-16 23:15:48,608 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Build call graph in: C:\\Users\\CXy\\AppData\\Local\\Temp\\tmp8mocg_mh\\Utility.cpp\n", + "2019-02-16 23:15:48,726 INFO [persper.analytics.lsp_graph_server.callgraph.builder] Yielded 27 branches.\n", + "2019-02-16 23:15:48,727 INFO [persper.analytics.lsp_graph_server.callgraph.manager] Added 181 branches from 4 files.\n", + "2019-02-16 23:15:48,728 INFO [persper.analytics.lsp_graph_server] Shutting down language server...\n", + "2019-02-16 23:15:58,730 WARNING [persper.analytics.lsp_graph_server] Killed language server 24144.\n" + ] + } + ], + "source": [ + "from persper.analytics.analyzer2 import Analyzer\n", + "from persper.analytics.lsp_graph_server.ccls import CclsGraphServer\n", + "from persper.analytics.call_commit_graph import CallCommitGraph, CommitIdGenerators\n", + "from tempfile import mkdtemp\n", + "\n", + "dumpLogs = True\n", + "workspaceRoot = mkdtemp()\n", + "print(\"Workspace root: \", workspaceRoot)\n", + "G = None\n", + "async with CclsGraphServer(workspaceRoot, cacheRoot=\"./.ccls-cache\",\n", + " languageServerCommand=\"../bin/ccls\" + (\" -log-file=ccls.log\" if dumpLogs else \"\"),\n", + " dumpLogs=dumpLogs,\n", + " graph=CallCommitGraph(commit_id_generator=CommitIdGenerators.fromComment)) as graphServer:\n", + " analyzer = Analyzer(\"../repos/cpp_test_repo\", graphServer)\n", + " graphServer.reset_graph()\n", + " await analyzer.analyze()\n", + " G = analyzer.graph" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Nodes: 42\n", + "Edges: 15\n" + ] + } + ], + "source": [ + "print(\"Nodes:\", len(G.nodes()))\n", + "print(\"Edges:\", len(G.edges()))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "int &operator>>(int &reader, long &rhs) | int &operator>>(int &reader, int &rhs)\n", + "int &operator>>(int &reader, float &rhs) | int &operator>>(int &reader, int &rhs)\n", + "int &operator>>(int &reader, double &rhs) | int &operator>>(int &reader, int &rhs)\n", + "int &operator>>(int &reader, bool &rhs) | int &operator>>(int &reader, int &rhs)\n", + "int &operator>>(int &reader, bool &rhs) | bool Equal(const int &lhs, const int &rhs, int comparision)\n", + "int &operator>>(int &reader, bool &rhs) | enum class StringComparison : int {}\n", + "bool Equal(const int &lhs, const int &rhs, StringComparison comparision) | bool Equal(const int &lhs, const int &rhs, int comparision)\n", + "bool Equal(const int &lhs, const int &rhs, StringComparison comparision) | enum class StringComparison : int {}\n", + "const char *FriendlyNameOf(const std::type_index &type) | const char *FriendlyNameOf()\n", + "const char *FriendlyNameOf(const type_info &type) | const char *FriendlyNameOf()\n", + "const char *FriendlyNameOf(const ReliabilityNetworkEntry &instance) | const char *FriendlyNameOf()\n", + "void ReportException(const int &ex, int level) | void ReportException(const int &ex, int level)\n", + "int main(int argc, char *argv[]) | TStream OpenAndValidate(const TPath arg1)\n", + "int main(int argc, char *argv[]) | void ReportException(const int &ex, int level)\n", + "bool Equal(const int &lhs, const int &rhs, int comparision) | enum class StringComparison : int {}\n" + ] + } + ], + "source": [ + "import itertools\n", + "for e in itertools.islice(G.edges(), 100):\n", + " print(e[0], \" | \", e[1])\n", + "if len(G.edges()) > 100:\n", + " print(\"...\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# un-pickle test\n", + "import pickle\n", + "loaded = pickle.load(open(\"cpp_test_repo-finished-0.pickle\", \"rb\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "RowReader &operator>>(RowReader &reader, int &rhs) | RowReader &operator>>(RowReader &reader, std::string &rhs)\n", + "RowReader &operator>>(RowReader &reader, long &rhs) | RowReader &operator>>(RowReader &reader, int &rhs)\n", + "RowReader &operator>>(RowReader &reader, long &rhs) | RowReader &operator>>(RowReader &reader, std::string &rhs)\n", + "RowReader &operator>>(RowReader &reader, float &rhs) | RowReader &operator>>(RowReader &reader, int &rhs)\n", + "RowReader &operator>>(RowReader &reader, float &rhs) | RowReader &operator>>(RowReader &reader, std::string &rhs)\n", + "RowReader &operator>>(RowReader &reader, double &rhs) | RowReader &operator>>(RowReader &reader, int &rhs)\n", + "RowReader &operator>>(RowReader &reader, double &rhs) | RowReader &operator>>(RowReader &reader, std::string &rhs)\n", + "RowReader &operator>>(RowReader &reader, bool &rhs) | RowReader &operator>>(RowReader &reader, int &rhs)\n", + "RowReader &operator>>(RowReader &reader, bool &rhs) | bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)\n", + "RowReader &operator>>(RowReader &reader, bool &rhs) | enum class StringComparison : int {}\n", + "RowReader &operator>>(RowReader &reader, bool &rhs) | RowReader &operator>>(RowReader &reader, std::string &rhs)\n", + "void ConfigurationParser::Load(std::istream &inputStream) | RowReader &operator>>(RowReader &reader, int &rhs)\n", + "int ConfigurationParser::GetInt(const std::string &key, int defaultValue) const | std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const\n", + "double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const | std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const\n", + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const | std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const\n", + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const | bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)\n", + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const | enum class StringComparison : int {}\n", + "ConfigurationParser::ConfigurationParser(std::istream &inputStream) | void ConfigurationParser::Load(std::istream &inputStream)\n", + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath) | TStream OpenAndValidate(const TPath arg1)\n", + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath) | void ConfigurationParser::Load(std::istream &inputStream)\n", + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision) | bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)\n", + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision) | enum class StringComparison : int {}\n", + "bool Confirm(const std::string &prompt) | bool Confirm(const std::string &prompt)\n", + "const char *FriendlyNameOf(const std::type_index &type) | const char *FriendlyNameOf()\n", + "const char *FriendlyNameOf(const type_info &type) | const char *FriendlyNameOf()\n", + "const char *FriendlyNameOf(const ReliabilityNetworkEntry &instance) | const char *FriendlyNameOf()\n", + "void ReportException(const std::exception &ex, int level) | void ReportException(const std::exception &ex, int level)\n", + "int main(int argc, char *argv[]) | TStream OpenAndValidate(const TPath arg1)\n", + "int main(int argc, char *argv[]) | bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const\n", + "int main(int argc, char *argv[]) | double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const\n", + "int main(int argc, char *argv[]) | std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const\n", + "int main(int argc, char *argv[]) | void ReportException(const std::exception &ex, int level)\n" + ] + } + ], + "source": [ + "for e in itertools.islice(loaded.get_graph().edges(), 100):\n", + " print(e[0], \" | \", e[1])\n", + "if len(loaded.get_graph().edges()) > 100:\n", + " print(\"...\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "No traceback available to show.\n" + ] + } + ], + "source": [ + "%tb" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "----" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('void ReportException(const int &ex, int level)', 0.49527330555396226)\n", + "('enum class StringComparison : int {}', 0.1145983519196816)\n", + "('bool Equal(const int &lhs, const int &rhs, int comparision)', 0.09938796985042238)\n", + "('int &operator>>(int &reader, bool &rhs)', 0.061196080770132576)\n", + "('int &operator>>(int &reader, int &rhs)', 0.04710340714736633)\n", + "('int main(int argc, char *argv[])', 0.04207230552946614)\n", + "('bool Confirm(const int &prompt)', 0.03251041790913293)\n", + "('int &operator>>(int &reader, long &rhs)', 0.011474265144399858)\n", + "('int &operator>>(int &reader, float &rhs)', 0.011474265144399858)\n", + "('int &operator>>(int &reader, double &rhs)', 0.011474265144399858)\n", + "('inline int to_string(const int &value)', 0.007649510096266572)\n", + "('const char *FriendlyNameOf()', 0.006788991932293897)\n", + "('TStream OpenAndValidate(const TPath arg1)', 0.0035379156601423947)\n", + "('ANSI_COLOR_RED', 0.001912377524066643)\n", + "('ANSI_COLOR_GREEN', 0.001912377524066643)\n", + "('ANSI_COLOR_YELLOW', 0.001912377524066643)\n", + "('ANSI_COLOR_BLUE', 0.001912377524066643)\n", + "('ANSI_COLOR_MAGENTA', 0.001912377524066643)\n", + "('ANSI_COLOR_CYAN', 0.001912377524066643)\n", + "('ANSI_COLOR_BRIGHT', 0.001912377524066643)\n", + "('ANSI_COLOR_RESET', 0.001912377524066643)\n", + "('bool dynamic_kind_of(const TSrc *obj)', 0.001912377524066643)\n", + "('bool pointer_kind_of(const int obj)', 0.001912377524066643)\n", + "('TDest safe_cast(TSrc obj)', 0.001912377524066643)\n", + "('int safe_pointer_cast(const int &obj)', 0.001912377524066643)\n", + "('int StreamStatusToString(const TStream &stream)', 0.001912377524066643)\n", + "('void ValidateStream(const TStream &stream)', 0.001912377524066643)\n", + "('_RangeToEnumerable RangeToEnumerable(const int range)', 0.001912377524066643)\n", + "('bool Equal(const int &lhs, const int &rhs, StringComparison comparision)', 0.001912377524066643)\n", + "('_RE_TRACE', 0.001912377524066643)\n", + "('const char *FriendlyNameOf(const std::type_index &type)', 0.001912377524066643)\n", + "('const char *FriendlyNameOf(const type_info &type)', 0.001912377524066643)\n", + "('const char *FriendlyNameOf(const ReliabilityNetworkEntry &instance)', 0.001912377524066643)\n", + "('TStream &operator>>(TStream &s, RowReader &reader)', 0.001912377524066643)\n", + "('RowReader &operator>>(RowReader &reader, int &rhs)', 0.001912377524066643)\n", + "('RowReader &operator>>(RowReader &reader, long &rhs)', 0.001912377524066643)\n", + "('RowReader &operator>>(RowReader &reader, float &rhs)', 0.001912377524066643)\n", + "('RowReader &operator>>(RowReader &reader, double &rhs)', 0.001912377524066643)\n", + "('RowReader &operator>>(RowReader &reader, bool &rhs)', 0.001912377524066643)\n", + "('_SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING', 0.001912377524066643)\n", + "('_DECLARE_ENUM', 0.001912377524066643)\n", + "('_DECLARE_ENUM_DEFAULT', 0.001912377524066643)\n" + ] + } + ], + "source": [ + "for t in sorted(G.function_devranks(0.85).items(), key=lambda t:t[1], reverse=True):\n", + " print(t)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/persper/analytics/analyzer.py b/persper/analytics/analyzer.py index f7a82f20e14..93e28004319 100644 --- a/persper/analytics/analyzer.py +++ b/persper/analytics/analyzer.py @@ -1,8 +1,11 @@ import os import time import pickle -from persper.analytics.git_tools import get_contents, _diff_with_first_parent +import asyncio +from persper.analytics.git_tools import get_contents, diff_with_first_parent, initialize_repo from persper.analytics.iterator import RepoIterator +from abc import ABC +from git import Commit def print_overview(commits, branch_commits): @@ -59,10 +62,24 @@ class Analyzer: def __init__(self, repo_path, graph_server): self._graph_server = graph_server + self._repo_path = repo_path self._ri = RepoIterator(repo_path) + self._repo = initialize_repo(repo_path) self._ccgraph = None + self._observer: AnalyzerObserver = emptyAnalyzerObserver - def analyze(self, rev=None, + @property + def observer(self): + """ + The AnalyzerObserver used to observe current Analyzer. + """ + return self._observer + + @observer.setter + def observer(self, value): + self._observer = value or emptyAnalyzerObserver + + async def analyze(self, rev=None, from_beginning=False, num_commits=None, continue_iter=False, @@ -93,23 +110,27 @@ def analyze(self, rev=None, for idx, commit in enumerate(reversed(commits), 1): phase = 'main' print_commit_info(phase, idx, commit, start_time, verbose) - self.analyze_master_commit(commit) + self._observer.onBeforeCommit(self, idx, commit, True) + await self.analyze_master_commit(commit) + self._observer.onAfterCommit(self, idx, commit, True) self.autosave(phase, idx, checkpoint_interval) for idx, commit in enumerate(branch_commits, 1): phase = 'branch' print_commit_info(phase, idx, commit, start_time, verbose) - self.analyze_branch_commit(commit) + self._observer.onBeforeCommit(self, idx, commit, False) + await self.analyze_branch_commit(commit) + self._observer.onAfterCommit(self, idx, commit, False) self.autosave(phase, idx, checkpoint_interval) self.autosave('finished', 0, 1) - def _analyze_commit(self, commit, server_func): + async def _analyze_commit(self, commit, server_func): self._graph_server.register_commit(commit.hexsha, commit.author.name, commit.author.email, commit.message) - diff_index = _diff_with_first_parent(commit) + diff_index = diff_with_first_parent(self._repo, commit) for diff in diff_index: old_fname, new_fname = _get_fnames(diff) @@ -128,21 +149,28 @@ def _analyze_commit(self, commit, server_func): if old_fname: old_src = get_contents( - self._ri.repo, commit.parents[0], old_fname) + self._repo, commit.parents[0], old_fname) if new_fname: - new_src = get_contents(self._ri.repo, commit, new_fname) + new_src = get_contents(self._repo, commit, new_fname) if old_src or new_src: # todo (hezheng) store the status somewhere for reporting later - status = server_func(old_fname, old_src, new_fname, new_src, diff.diff) + result = server_func(old_fname, old_src, new_fname, new_src, diff.diff) + if asyncio.iscoroutine(result): + result = await result + status = result - def analyze_master_commit(self, commit): - self._analyze_commit(commit, self._graph_server.update_graph) + result = self._graph_server.end_commit(commit.hexsha) + if asyncio.iscoroutine(result): + result = await result + + async def analyze_master_commit(self, commit): + await self._analyze_commit(commit, self._graph_server.update_graph) # todo (hezheng) implement correct analysis for branches - def analyze_branch_commit(self, commit): - self._analyze_commit(commit, self._graph_server.update_graph) + async def analyze_branch_commit(self, commit): + await self._analyze_commit(commit, self._graph_server.update_graph) def reset_state(self): self._ccgraph = None @@ -157,6 +185,58 @@ def save(self, fname): def autosave(self, phase, idx, checkpoint_interval): if idx % checkpoint_interval == 0: - repo_name = os.path.basename(self._ri.repo_path.rstrip('/')) + repo_name = os.path.basename(self._repo_path.rstrip('/')) fname = repo_name + '-' + phase + '-' + str(idx) + '.pickle' self.save(fname) + + def __getstate__(self): + state = self.__dict__.copy() + state.pop("_observer", None) + return state + + def __setstate__(self, state): + self.__dict__.update(state) + + +class AnalyzerObserver(ABC): + """ + Used to observe the progress of `Analyzer` during its analysis of the target repository. + You need to derive your own observer class from it before assigning your observer instance + to `Analyzer.observer`. + """ + def __init__(self): + pass + + def onBeforeCommit(self, analyzer:Analyzer, index:int, commit:Commit, isMaster:bool): + """ + Called before the observed Analyzer is about to analyze a commit. + Params: + analyzer: the observed Analyzer instance. + index: the index of the commit, depending on the behavior of the analyzer. + This is usually a series of 1-based ordinal index for master commits, + and another series of 1-based ordinal index for branch commits. + commit: the commit to be analyzed. + isMaster: whether the current commit is one of the master commits. + """ + pass + + def onAfterCommit(self, analyzer:Analyzer, index:int, commit:Commit, isMaster:bool): + """ + Called after the observed Analyzer has finished analyzing a commit. + Params: + analyzer: the observed Analyzer instance. + index: the index of the commit, depending on the behavior of the analyzer. + This is usually a series of 1-based ordinal index for master commits, + and another series of 1-based ordinal index for branch commits. + commit: the commit that has just been analyzed. + isMaster: whether the current commit is one of the master commits. + """ + pass + +class _EmptyAnalyzerObserverType(AnalyzerObserver): + pass + +emptyAnalyzerObserver = _EmptyAnalyzerObserverType() +""" +An AnalyzerObserver instance that does nothing in their notification methods. +""" diff --git a/persper/analytics/analyzer2.py b/persper/analytics/analyzer2.py new file mode 100644 index 00000000000..c670ed74bd5 --- /dev/null +++ b/persper/analytics/analyzer2.py @@ -0,0 +1,302 @@ +import asyncio +import collections.abc +from abc import ABC +from typing import List, Optional, Set, Union + +from git import Commit, Diff, DiffIndex, Repo + +from persper.analytics.commit_classifier import CommitClassifier +from persper.analytics.git_tools import diff_with_commit, get_contents +from persper.analytics.graph_server import CommitSeekingMode, GraphServer +from persper.analytics.score import commit_overall_scores + + +class Analyzer: + def __init__(self, repositoryRoot: str, graphServer: GraphServer, + terminalCommit: str = "master", + firstParentOnly: bool = False, + commit_classifier: Optional[CommitClassifier] = None): + self._repositoryRoot = repositoryRoot + self._graphServer = graphServer + self._repo = Repo(repositoryRoot) + self._originCommit: Commit = None + self._terminalCommit: Commit = self._repo.rev_parse(terminalCommit) + self._firstParentOnly = firstParentOnly + self._visitedCommits = set() + self._s_visitedCommits = _ReadOnlySet(self._visitedCommits) + self._observer: AnalyzerObserver = emptyAnalyzerObserver + self._commit_classifier = commit_classifier + self._clf_results: Dict[str, List[float]] = {} + + def __getstate__(self): + state = self.__dict__.copy() + state.pop("_repo", None) + state.pop("_s_visitedCommits", None) + state["_originCommit"] = self._originCommit.hexsha if self._originCommit else None + state["_terminalCommit"] = self._terminalCommit.hexsha if self._terminalCommit else None + state.pop("_observer", None) + return state + + def __setstate__(self, state): + self.__dict__.update(state) + self._repo = Repo(self._repositoryRoot) + self.originCommit = state["_originCommit"] + self.terminalCommit = state["_terminalCommit"] + self._s_visitedCommits = _ReadOnlySet(self._visitedCommits) + + @property + def graphServer(self): + return self._graphServer + + @property + def observer(self): + """ + The AnalyzerObserver used to observe current Analyzer. + """ + return self._observer + + @observer.setter + def observer(self, value): + self._observer = value or emptyAnalyzerObserver + + @property + def originCommit(self): + """ + Gets/sets the first commit to visit. (exclusive) + Use None to start visiting from the first commit. + """ + return self._originCommit + + @originCommit.setter + def originCommit(self, value: Union[Commit, str]): + self._originCommit = self._repo.commit(value) if value else None + + @property + def terminalCommit(self): + """ + Gets/sets the last commit to visit. (inclusive) + """ + return self._terminalCommit + + @terminalCommit.setter + def terminalCommit(self, value: Union[Commit, str]): + self._terminalCommit = self._repo.commit(value) + + @property + def firstParentOnly(self): + """ + Whether to only visit each commit's first parent. + This is useful if you are only interested in the topical branch. + """ + return self._firstParentOnly + + @firstParentOnly.setter + def firstParentOnly(self, value: bool): + self._firstParentOnly = value + + @property + def graph(self): + return self._graphServer.get_graph() + + @property + def visitedCommits(self) -> Set[str]: + """ + Gets a set of visited commits, identified by their their SHA. + """ + return self._s_visitedCommits + + def compute_commit_scores(self, alpha: float, label_weights: List[float], + top_one=False): + """ + Compute the overall scores for all commits by combining DevRank and + commit classification. + """ + return commit_overall_scores(self.graph.commit_devranks(alpha), + self._clf_results, + label_weights, + top_one=top_one) + + async def analyze(self, maxAnalyzedCommits=None): + graphServerLastCommit: str = None + commitSpec = self._terminalCommit + if self._originCommit: + commitSpec = self._originCommit.hexsha + ".." + self._terminalCommit.hexsha + + analyzedCommits = 0 + for commit in self._repo.iter_commits(commitSpec, + topo_order=True, reverse=True, first_parent=self._firstParentOnly): + def printCommitStatus(status: str): + message = commit.message.strip()[:32] + # note the commit # here only indicates the ordinal of current commit in current analysis session + print("Commit #{0} {1} ({2}): {3}".format(analyzedCommits, commit.hexsha, message, status)) + + if maxAnalyzedCommits and analyzedCommits >= maxAnalyzedCommits: + print("Max analyzed commits reached.") + break + if commit.hexsha in self._visitedCommits: + printCommitStatus("Already visited.") + continue + if len(commit.parents) > 1: + # merge commit + # process connection, do not process diff + printCommitStatus("Going forward (merge).") + if self._firstParentOnly: + assert graphServerLastCommit == commit.parents[0].hexsha, \ + "git should traverse along first parent, but actually not." + await self._analyzeCommit(commit, graphServerLastCommit, CommitSeekingMode.NormalForward) + else: + await self._analyzeCommit(commit, graphServerLastCommit, CommitSeekingMode.MergeCommit) + elif not commit.parents: + printCommitStatus("Going forward (initial commit).") + await self._analyzeCommit(commit, None, CommitSeekingMode.NormalForward) + else: + parent: Commit = commit.parents[0] + if graphServerLastCommit != parent.hexsha: + printCommitStatus( + "Rewind to parent: {0}.".format(parent.hexsha)) + # jumping to the parent commit first + await self._analyzeCommit(parent, graphServerLastCommit, CommitSeekingMode.Rewind) + # then go on with current commit + printCommitStatus("Going forward.") + await self._analyzeCommit(commit, parent, CommitSeekingMode.NormalForward) + self._visitedCommits.add(commit.hexsha) + graphServerLastCommit = commit.hexsha + analyzedCommits += 1 + + async def _analyzeCommit(self, commit: Union[Commit, str], parentCommit: Union[Commit, str], + seekingMode: CommitSeekingMode): + """ + parentCommit can be None. + """ + if type(commit) != Commit: + commit = self._repo.commit(commit) + + self._observer.onBeforeCommit(self, commit, seekingMode) + result = self._graphServer.start_commit(commit.hexsha, seekingMode, + commit.author.name, commit.author.email, commit.message) + if asyncio.iscoroutine(result): + await result + diff_index = diff_with_commit(self._repo, commit, parentCommit) + + # commit classification + if self._commit_classifier and commit.hexsha not in self._clf_results: + prob = self._commit_classifier.predict(commit, diff_index) + self._clf_results[commit.hexsha] = prob + + for diff in diff_index: + old_fname, new_fname = _get_fnames(diff) + # apply filter + # if a file comes into/goes from our view, we will set corresponding old_fname/new_fname to None, + # as if the file is introduced/removed in this commit. + # However, the diff will keep its original, no matter if the file has been filtered in/out. + if old_fname and not self._graphServer.filter_file(old_fname): + old_fname = None + if new_fname and not self._graphServer.filter_file(new_fname): + new_fname = None + if not old_fname and not new_fname: + # no modification + continue + + old_src = new_src = None + + if old_fname: + old_src = get_contents(self._repo, parentCommit, old_fname) + + if new_fname: + new_src = get_contents(self._repo, commit, new_fname) + + if old_src or new_src: + result = self._graphServer.update_graph( + old_fname, old_src, new_fname, new_src, diff.diff) + if asyncio.iscoroutine(result): + await result + + result = self._graphServer.end_commit(commit.hexsha) + if asyncio.iscoroutine(result): + await result + self._observer.onAfterCommit(self, commit, seekingMode) + + +def _get_fnames(diff: Diff): + if diff.new_file: + # change type 'A' + old_fname = None + new_fname = diff.b_blob.path + elif diff.deleted_file: + # change type 'D' + old_fname = diff.a_blob.path + new_fname = None + elif diff.renamed: + # change type 'R' + old_fname = diff.rename_from + new_fname = diff.rename_to + elif (diff.a_blob and diff.b_blob and + (diff.a_blob != diff.b_blob)): + # change type 'M' + old_fname = new_fname = diff.b_blob.path + else: + # change type 'U' + return None, None + + return old_fname, new_fname + + +class AnalyzerObserver(ABC): + """ + Used to observe the progress of `Analyzer` during its analysis of the target repository. + You need to derive your own observer class from it before assigning your observer instance + to `Analyzer.observer`. + """ + + def __init__(self): + pass + + def onBeforeCommit(self, analyzer: Analyzer, commit: Commit, seeking_mode: CommitSeekingMode): + """ + Called before the observed Analyzer is about to analyze a commit. + Params: + analyzer: the observed Analyzer instance. + index: the index of the commit, depending on the behavior of the analyzer. + This is usually a series of 1-based ordinal index for master commits, + and another series of 1-based ordinal index for branch commits. + commit: the commit to be analyzed. + isMaster: whether the current commit is one of the master commits. + """ + pass + + def onAfterCommit(self, analyzer: Analyzer, commit: Commit, seeking_mode: CommitSeekingMode): + """ + Called after the observed Analyzer has finished analyzing a commit. + Params: + analyzer: the observed Analyzer instance. + index: the index of the commit, depending on the behavior of the analyzer. + This is usually a series of 1-based ordinal index for master commits, + and another series of 1-based ordinal index for branch commits. + commit: the commit that has just been analyzed. + isMaster: whether the current commit is one of the master commits. + """ + pass + + +class _EmptyAnalyzerObserverType(AnalyzerObserver): + pass + + +emptyAnalyzerObserver = _EmptyAnalyzerObserverType() +""" +An AnalyzerObserver instance that does nothing in their notification methods. +""" + + +class _ReadOnlySet(collections.abc.Set): + def __init__(self, underlyingSet: collections.abc.Set): + self._underlyingSet = underlyingSet + + def __contains__(self, x): + return x in self._underlyingSet + + def __len__(self): + return len(self._underlyingSet) + + def __iter__(self): + return self._underlyingSet.__iter__() diff --git a/persper/analytics/another_patch_parser.py b/persper/analytics/another_patch_parser.py new file mode 100644 index 00000000000..cb735c164d3 --- /dev/null +++ b/persper/analytics/another_patch_parser.py @@ -0,0 +1,65 @@ +import re + +_hunkHeader = re.compile( + r"^@@\s*\-(?P\d+),\s*\d+\s+\+(?P\d+),\s*\d+\s*@@") + + +def parseUnifiedDiff(diffContent: str, lineNumberOffset: int = 0): + """ + Parse unified diff content, and return the ranges of added and removed lines. + Returns + (addedRanges, removedRanges), where + addedRanges: a list of line ranges [[startLine1, endLine1], ...] added into the new file, + using the 1-based line numbers in the new file. + removedRanges: a list of line ranges [[startLine1, endLine1], ...] removed from the old file, + using the 1-based line numbers in the old file. + """ + leftLine: int = None + rightLine: int = None + addedRanges = [] + removedRanges = [] + lastAddedRange: list = None + lastRemovedRange: list = None + isInPrologue = True + for diffLine in diffContent.rstrip("\r\n\v").split("\n"): + if diffLine.startswith("@@"): + match = _hunkHeader.search(diffLine) + if not match: + if isInPrologue: + continue + raise ValueError(str.format( + "Invalid diff line: {0}.", diffLine)) + leftLine = int(match.group("LN")) + lineNumberOffset + rightLine = int(match.group("RN")) + lineNumberOffset + lastAddedRange = lastRemovedRange = None + isInPrologue = False + elif diffLine.startswith(" "): + assert leftLine != None and rightLine != None + leftLine += 1 + rightLine += 1 + lastAddedRange = lastRemovedRange = None + elif diffLine.startswith("-"): + assert leftLine != None and rightLine != None + if lastRemovedRange: + lastRemovedRange[1] = leftLine + else: + lastRemovedRange = [leftLine, leftLine] + removedRanges.append(lastRemovedRange) + leftLine += 1 + elif diffLine.startswith("+"): + assert leftLine != None and rightLine != None + if lastAddedRange: + lastAddedRange[1] = rightLine + else: + lastAddedRange = [rightLine, rightLine] + addedRanges.append(lastAddedRange) + rightLine += 1 + elif diffLine.startswith("\\"): + # \ No newline at end of file + # Do nothing. We ignore blank lines. + pass + else: + if isInPrologue: + continue + raise ValueError(str.format("Invalid diff line: {0}.", diffLine)) + return addedRanges, removedRanges diff --git a/persper/analytics/c.py b/persper/analytics/c.py index 2f5a8f29fa7..e6897bafa78 100644 --- a/persper/analytics/c.py +++ b/persper/analytics/c.py @@ -1,6 +1,6 @@ import re from persper.analytics.inverse_diff import inverse_diff -from persper.analytics.srcml import transform_src_to_tree +from persper.analytics.srcml import src_to_tree from persper.analytics.call_graph.c import update_graph, get_func_ranges_c from persper.analytics.detect_change import get_changed_functions from persper.analytics.patch_parser import PatchParser @@ -8,6 +8,35 @@ from persper.analytics.call_commit_graph import CallCommitGraph +def function_change_stats(old_ast, new_ast, patch, patch_parser, ranges_func): + """ + Parse old/new source files and extract the change info for all functions + """ + adds, dels = patch_parser(patch) + + forward_stats = {} + bckward_stats = {} + + if old_ast is not None: + forward_stats = get_changed_functions( + *ranges_func(old_ast), adds, dels, separate=True) + + if new_ast is not None: + inv_adds, inv_dels = inverse_diff(adds, dels) + bckward_stats = get_changed_functions( + *ranges_func(new_ast), inv_adds, inv_dels, separate=True) + + # merge forward and backward stats + for func, fstat in bckward_stats.items(): + if func not in forward_stats: + forward_stats[func] = { + 'adds': fstat['dels'], + 'dels': fstat['adds'] + } + + return forward_stats + + class CGraphServer(GraphServer): def __init__(self, filename_regex_strs): self._ccgraph = CallCommitGraph() @@ -21,30 +50,28 @@ def register_commit(self, hexsha, author_name, author_email, def update_graph(self, old_filename, old_src, new_filename, new_src, patch): ast_list = [] - forward_stats = {} - bckward_stats = {} - adds, dels = self._parse_patch(patch) + old_ast = None + new_ast = None + # Parse source codes into ASTs if old_src: - old_ast = transform_src_to_tree(old_src) + old_ast = src_to_tree(old_filename, old_src) if old_ast is None: return -1 - forward_stats = get_changed_functions( - *get_func_ranges_c(old_ast), adds, dels) - if new_src: - new_ast = transform_src_to_tree(new_src) + new_ast = src_to_tree(new_filename, new_src) if new_ast is None: return -1 - ast_list = [new_ast] - inv_adds, inv_dels = inverse_diff(adds, dels) - bckward_stats = get_changed_functions( - *get_func_ranges_c(new_ast), inv_adds, inv_dels) - bckward_stats.update(forward_stats) - update_graph(self._ccgraph, ast_list, bckward_stats) + # Compute function change stats + change_stats = function_change_stats(old_ast, new_ast, patch, + self._parse_patch, + get_func_ranges_c) + + # Update call-commit graph + update_graph(self._ccgraph, ast_list, change_stats) return 0 def get_graph(self): diff --git a/persper/analytics/call_commit_graph.py b/persper/analytics/call_commit_graph.py index e2ad3d367d5..6d3db3bb0f1 100644 --- a/persper/analytics/call_commit_graph.py +++ b/persper/analytics/call_commit_graph.py @@ -1,79 +1,132 @@ +""" +call_commit_graph.py +==================================== +CallCommitGraph stores all relevant analysis results +""" import networkx as nx from networkx.readwrite import json_graph from persper.analytics.devrank import devrank +from persper.analytics.score import normalize +from typing import Union, Set, List, Dict, Optional -def normalize(devranks): - normalized_devranks = {} - dr_sum = 0 - for _, dr in devranks.items(): - dr_sum += dr +class CommitIdGenerators: + @staticmethod + def fromOrdinal(ordinal: int, hexsha: str, message: str): + return ordinal - for idx in devranks: - normalized_devranks[idx] = devranks[idx] / dr_sum - return normalized_devranks + @staticmethod + def fromComment(ordinal: int, hexsha: str, message: str): + return message.strip() + @staticmethod + def fromHexsha(ordinal: int, hexsha: str, message: str): + return hexsha -class CallCommitGraph: - def __init__(self, node_link_data=None): - if node_link_data: - self._digraph = json_graph.node_link_graph(node_link_data) +class CallCommitGraph: + """ + The key data structure that stores all functions' call relationships + and edit histories across commits. + """ + + def __init__(self, graph_data: Optional[Dict] = None, commit_id_generator=CommitIdGenerators.fromHexsha): + if graph_data: + self._digraph = json_graph.node_link_graph( + CallCommitGraph._to_networkx_format(graph_data)) else: - self._digraph = nx.DiGraph(commitList=[]) + self._digraph = self._new_graph() + self._commit_id_generator = commit_id_generator + self._current_commit_id = None + + @staticmethod + def _to_networkx_format(graph_data: Dict) -> Dict: + graph_data['multigraph'] = False + graph_data['directed'] = True + for node in graph_data['nodes']: + node['files'] = set(node['files']) + return graph_data + + def reset(self): + """Reset all internal states""" + self._digraph = self._new_graph() + + def _new_graph(self): + """Create a new nx.DiGraph for underlying storage + with appropriate arguments""" + return nx.DiGraph(commits={}) - # Read-only access def nodes(self, data=False): + """Provide read-only access for nodes""" return self._digraph.nodes(data=data) - # Read-only access def edges(self, data=False): + """Provide read-only access for edges""" return self._digraph.edges(data=data) - # Read-only access def commits(self): + """Provide read-only access for commits""" # https://networkx.github.io/documentation/stable/tutorial.html#graph-attributes - return self._digraph.graph['commitList'] + return self._digraph.graph['commits'] + + def files(self, node: str) -> Set[str]: + """Provide read-only access to `files` attribute of a node""" + return self.nodes()[node]['files'] + + def __contains__(self, node): + """Implement membership check""" + return node in self._digraph - def add_commit(self, hexsha, author_name, author_email, commit_message): - self._digraph.graph['commitList'].append({ - 'hexsha': hexsha, 'authorName': author_name, - 'authorEmail': author_email, 'message': commit_message - }) + def add_commit(self, hexsha, author_name, author_email, message): + # TODO: remove `id` in a commit object + self._current_commit_id = self._commit_id_generator(self._next_cindex(), hexsha, message) + self._digraph.graph['commits'][hexsha] = { + 'id': self._current_commit_id, + 'hexsha': hexsha, + 'authorName': author_name, + 'authorEmail': author_email, + 'message': message + } # The index of the commit being analyzed def _cur_cindex(self): return len(self.commits()) - 1 - def reset(self): - self._digraph = nx.DiGraph(commitList=[]) + def _next_cindex(self): + return self._cur_cindex() + 1 - def __contains__(self, node): - return node in self._digraph - - def add_node(self, node): - self._digraph.add_node(node, size=None, history={}) + # TODO: remove the default value of files + def add_node(self, node: str, files: Union[Set[str], List[str]] = []): + self._digraph.add_node(node, size=None, history={}, files=set(files)) # add_node must be called on source and target first def add_edge(self, source, target): + if source not in self._digraph: + raise ValueError("Error: caller %s does not exist in call-commit graph." % source) + if target not in self._digraph: + raise ValueError("Error: callee %s does not exist in call-commit graph." % target) self._digraph.add_edge(source, target, - addedBy=self._cur_cindex(), + addedBy=self._current_commit_id, weight=None) - def update_node_history(self, node, size): - # Use current commit index - cc_idx = self._cur_cindex() + def update_node_history(self, node, num_adds, num_dels): node_history = self._get_node_history(node) - # A commit might update a node's history more than once - if cc_idx in node_history: - node_history[cc_idx] += size + # A commit might update a node's history more than once when + # a single FunctionNode corresponds to more than one actual functions + if self._current_commit_id in node_history: + node_history[self._current_commit_id]['adds'] += num_adds + node_history[self._current_commit_id]['dels'] += num_dels else: - node_history[cc_idx] = size + node_history[self._current_commit_id] = {'adds': num_adds, 'dels': num_dels} # read/write access to node history are thourgh this function - def _get_node_history(self, node): + def _get_node_history(self, node: str) -> Dict[str, Dict[str, int]]: return self._digraph.nodes[node]['history'] + def update_node_files(self, node: str, new_files: Union[Set[str], List[str]]): + self._digraph.nodes[node]['files'] = set(new_files) + + # TODO: provide other options for computing a node's size def _set_all_nodes_size(self, black_set=None): """ Compute node size after nodes have been added to the graph node size is currently defined as the total number lines of edits @@ -84,12 +137,12 @@ def _set_all_nodes_size(self, black_set=None): node_history = self._get_node_history(node) if black_set is not None: size = 0 - for cindex, csize in node_history.items(): - sha = self.commits()[cindex]['hexsha'] + for cid, chist in node_history.items(): + sha = self.commits()[cid]['hexsha'] if sha not in black_set: - size += csize + size += (chist['adds'] + chist['dels']) else: - size = sum(node_history.values()) + size = sum([chist['adds'] + chist['dels'] for chist in node_history.values()]) # set default size to 1 to avoid zero division error if size == 0: @@ -130,8 +183,9 @@ def commit_devranks(self, alpha, black_set=None): if len(history) == 0: continue - for cindex, csize in history.items(): - sha = self.commits()[cindex]['hexsha'] + for cid, chist in history.items(): + csize = chist['adds'] + chist['dels'] + sha = self.commits()[cid]['hexsha'] if black_set is None or sha not in black_set: dr = (csize / size) * func_devranks[func] if sha in commit_devranks: @@ -150,7 +204,7 @@ def developer_devranks(self, alpha, black_set=None): developer_devranks = {} commit_devranks = self.commit_devranks(alpha, black_set=black_set) - for commit in self.commits(): + for commit in self.commits().values(): sha = commit['hexsha'] email = commit['authorEmail'] diff --git a/persper/analytics/call_graph/c.py b/persper/analytics/call_graph/c.py index dca03678b02..18d84449ede 100644 --- a/persper/analytics/call_graph/c.py +++ b/persper/analytics/call_graph/c.py @@ -1,50 +1,56 @@ -import networkx as nx -from persper.graphs.call_graph.utils import remove_edges_of_node, ns, line_attr +from persper.analytics.call_graph.utils import ns, line_attr +from typing import Set class NotFunctionCallError(Exception): """Raise for false positive nodes""" -def handle_function(func_node): +def _handle_function(func_node): """Given a node, return function name and function range (start & end lineno)""" - + # function name name_node = func_node.find('srcml:name', ns) - func_name, start_line = handle_name(name_node) + func_name, start_line = _handle_function_name(name_node) if not func_name or not start_line: - print('Function name/start not found!') # very unlikely to happen + print('ERROR: _handle_function fails to extract name or location.') return None, None, None + # function body block_node = func_node.find('srcml:block', ns) if block_node is None: try: block_node = func_node.xpath('./following-sibling::srcml:block', namespaces=ns)[0] except: - print("Block node not found (in func {})".format(func_name)) - return func_name, None, None + print("ERROR: %s has no block_node." % func_namae) + return func_name, start_line, None try: pos_node = block_node.find('pos:position', ns) end_line = int(pos_node.attrib[line_attr]) except: - print("Block node doesn't have position node inside!") - return func_name, None, None + print("ERROR: %s's block_node doesn't have position info." % func_name) + return func_name, start_line, None return func_name, start_line, end_line -def handle_name(name_node): +def _handle_function_name(name_node): """Given an node, return its text content and position (line)""" - text, line = None, None + name, line = None, None if name_node is not None: - text = name_node.text - line = int(name_node.attrib[line_attr]) - return text, line + if name_node.text: + name = name_node.text + line = int(name_node.attrib[line_attr]) + else: + line = int(name_node[0].attrib[line_attr]) + name = name_node[2].text + + return name, line -def handle_call(call_node): +def _handle_call(call_node): """Given an node, return function name being called Throws NotFunctionCallException @@ -69,17 +75,23 @@ def handle_call(call_node): def update_graph(ccgraph, ast_list, change_stats): for ast in ast_list: + filename = ast.attrib['filename'] for function in ast.findall('./srcml:function', namespaces=ns): - caller_name, _, _ = handle_function(function) + caller_name, _, _ = _handle_function(function) if not caller_name: continue if caller_name not in ccgraph: - ccgraph.add_node(caller_name) + ccgraph.add_node(caller_name, [filename]) + else: + files: Set[str] = ccgraph.files(caller_name) + if filename not in files: + files.add(filename) + ccgraph.update_node_files(caller_name, files) for call in function.xpath('.//srcml:call', namespaces=ns): try: - callee_name = handle_call(call) + callee_name = _handle_call(call) except NotFunctionCallError: continue except: @@ -87,21 +99,23 @@ def update_graph(ccgraph, ast_list, change_stats): continue if callee_name not in ccgraph: - ccgraph.add_node(callee_name) + # Pass [] to files argument since we don't know + # which file this node belongs to + ccgraph.add_node(callee_name, []) ccgraph.add_edge(caller_name, callee_name) - for func_name, change_size in change_stats.items(): - if func_name not in ccgraph: + for func, fstat in change_stats.items(): + if func not in ccgraph: print("%s in change_stats but not in ccgraph" % func_name) continue - ccgraph.update_node_history(func_name, change_size) + ccgraph.update_node_history(func, fstat['adds'], fstat['dels']) def get_func_ranges_c(root): func_names, func_ranges = [], [] for func_node in root.findall('./srcml:function', namespaces=ns): - func_name, start_line, end_line = handle_function(func_node) + func_name, start_line, end_line = _handle_function(func_node) if not (func_name and start_line and end_line): continue diff --git a/persper/analytics/commit_classifier.py b/persper/analytics/commit_classifier.py new file mode 100644 index 00000000000..7e3a5cb58f7 --- /dev/null +++ b/persper/analytics/commit_classifier.py @@ -0,0 +1,33 @@ +from abc import ABC, abstractmethod +from git import Commit, DiffIndex + + +class CommitClassifier(ABC): + """ + Defines the interface of any commit classifier + """ + + @abstractmethod + def predict(self, commit: Commit, diff_index: DiffIndex): + """ + Args: + commit: A gitpython's Commit object. + diff_index: A gitpython's DiffIndex object. + It is a list of Diff object, each containing the + diff information between a pair of old/new source files. + + + Returns: + A list, representing the probability distribution of each label + """ + pass + + @property + @abstractmethod + def labels(self): + """ + Returns: + A list of label (str), + in the same order as `predict` method's output. + """ + pass diff --git a/persper/analytics/git_tools.py b/persper/analytics/git_tools.py index 716f80e3156..93e1584a427 100644 --- a/persper/analytics/git_tools.py +++ b/persper/analytics/git_tools.py @@ -1,18 +1,26 @@ from git.exc import InvalidGitRepositoryError, NoSuchPathError -from git import Repo +from git import Repo, Commit +from typing import Union import sys +import git EMPTY_TREE_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' -def _diff_with_first_parent(commit): +def diff_with_first_parent(repo: Repo, commit: Commit): if len(commit.parents) == 0: - prev_commit = EMPTY_TREE_SHA + return diff_with_commit(repo, commit, None) else: - prev_commit = commit.parents[0] - # commit.diff automatically detect renames - return commit.diff(prev_commit, - create_patch=True, R=True, indent_heuristic=True) + return diff_with_commit(repo, commit, commit.parents[0]) + + +def diff_with_commit(repo: Repo, current_commit: Commit, base_commit_sha: str): + if not base_commit_sha: + base_commit = repo.tree(EMPTY_TREE_SHA) + else: + base_commit = repo.commit(base_commit_sha) + return base_commit.diff(current_commit, create_patch=True, indent_heuristic=True, + ignore_blank_lines=True, ignore_space_change=True) def initialize_repo(repo_path): @@ -29,4 +37,6 @@ def initialize_repo(repo_path): def get_contents(repo, commit, path): """Get contents of a path within a specific commit""" - return repo.git.show('{}:{}'.format(commit.hexsha, path)) + if type(commit) == Commit: + commit = commit.hexsha + return repo.git.show('{}:{}'.format(commit, path)) diff --git a/persper/analytics/go.py b/persper/analytics/go.py index 284ab5cf6d0..e4a39fb178d 100644 --- a/persper/analytics/go.py +++ b/persper/analytics/go.py @@ -40,10 +40,7 @@ def update_graph(self, old_filename, old_src, def get_graph(self): graph_url = self.server_addr + '/callgraph' r = requests.get(graph_url) - graph_data = r.json() - graph_data['directed'] = True - graph_data['multigraph'] = False - return CallCommitGraph(graph_data) + return CallCommitGraph(graph_data=r.json()) def reset_graph(self): reset_url = urllib.parse.urljoin(self.server_addr, '/reset') diff --git a/persper/analytics/graph_server.py b/persper/analytics/graph_server.py index c760aa09d13..f70f1415df3 100644 --- a/persper/analytics/graph_server.py +++ b/persper/analytics/graph_server.py @@ -1,5 +1,7 @@ from abc import ABC from abc import abstractmethod +from aenum import Enum +from persper.analytics.call_commit_graph import CallCommitGraph JS_FILENAME_REGEXES = [ r'.+\.js$', @@ -25,31 +27,155 @@ r'.+\.go$' ] + +class CommitSeekingMode(Enum): + """ + Describes how `Analyzer` has reached the current commit. + """ + _init_ = "value __doc__" + NormalForward = 0, """ + The current commit has been reached because `Analyzer` is going to analyze this commit. + `GraphServer` implementation should update its working tree according to the subsequent + `update_graph` calls, as well as the call commit graph preserved inside `GraphServer`. + """ + MergeCommit = 1, """ + The current commit has been reached because `Analyzer` is going to analyze this commit. + However, the current commit is a merge commit. Some commit graph traits might be + updated differently from NormalForward case; for example, we still update edges, + but we don't update node history in this commit. + """ + Rewind = 2, """ + The current commit has been reached because `Analyzer` is tracing back (or more generally, "jumping") + to the parent commit (A^) of certain commit (A). Usually there should be no changes to the commit graph + preserved in the GraphServer during going through the diff of this commit. But yet GraphServer should update + its workspace tree accordingly, because the next commit shall be the "certain commit" (A) to be analyzed + either as `NormalForward` or `MergeCommit`. + To ensure GraphServer can correctly obtain the file change information, Analyzer + will go to its parent commit (A^) first + """ + + class GraphServer(ABC): + r""" + Provides implementation-specific ability to build call commit graph via + some or all of the commits in a repository. + + `analyzer2.Analyzer` is the consumer of this class. It will ensure the methods be called + in the following order: + + ``` + lastCommit = EMPTY_TREE_SHA + for commit in commits: + start_commit(commit) + for oldFileName, fileName, fileDiff in compareCommit(lastCommit, commit): + filter_file(oldFileName) + filter_file(fileName) + update_graph(oldFileName, fileName, fileDiff) + end_commit(commit) + lastCommit = commit + ``` + + `Analyzer` will visit a range of commits in certain order (though it's usually + topology order because we may reduce as more Rewinds as possible), and it will + indicate how it has reached the current commit in `start_commit`. Because the + commit history is not linear, we may sometimes need to move backwards (rewind) + in the commit tree. In that case, we need to ensure `GraphServer` can always + get the correct file diff from __parent__ commit. For example, in a commit tree + like this + ``` + A -- B -- C -- D -- E -- F + \ -- a -- b -- / + ``` + where `master` == `F`, `Analyzer` may visit the commits in the following order + * A (NormalForward) + * B (NormalForward) + * C (NormalForward) + * D (NormalForward) + * B (Rewind) + * a (NormalForward) + * b (NormalForward) + * E (MergeCommit) + * F (NormalForward) + + When implementing this class, you should ensure to + * Update both the edge (call relation) and node history for `NormalForward` commits + * Update edge but keep node history untouched for `MergeCommit` commits + * Do not update call commit graph for `Rewind` commits + + The existence of `Rewind` commits is only to simplify the node history generation + for `GraphServer`, because node history means the diff to the __parent__ commit. + + If there is any Error raised in any of the implementation methods, the status of + `GraphSever` will be unspecified. It's suggested the consumer of `GraphServer` + create a new instance of it. + + When overriding this class, some of the methods may be implemented either as + synchronous or asynchronous (with `asyncio`, or `async def`). You will find the + note on the methods respectively. + """ - @abstractmethod def register_commit(self, hexsha, author_name, author_email, commit_message): """ + Deprecated. Do not override this method. + Override start_commit instead. :return: a status code, success or failure """ + raise NotImplementedError() @abstractmethod def update_graph(self, old_filename: str, old_src: str, new_filename: str, new_src: str, patch: bytes): """ - Update the graph with a single-file patch - :param old_filename: the path to a file that the commit modifies - :param old_src: the source code of the file before the commit - :param new_filename: the path to the file after the commit - :param new_src: the source code of the file after the commit - :param patch: the raw patch generated by GitPython diff - :return a status code + Notifies `GraphServer` a file has been changed in this commit. + params + :param old_filename: the path to a file that the commit modifies + :param old_src: the source code of the file before the commit + :param new_filename: the path to the file after the commit + :param new_src: the source code of the file after the commit + :param patch: the raw patch generated by GitPython diff + + remarks + This method can be implemented as async method. + The name of this function is kept for backward-compatibility. + It's up to implementation to decide whether to update the + call commit graph on the fly, or to only make necessary work + tree modifications in this method, and update the call commit + graph in whole in `end_commit` method. You may also choose to + update some part of the graph in `update_graph`, and the rest + in `end_commit`. + """ + pass + + def start_commit(self, hexsha: str, seeking_mode: CommitSeekingMode, author_name: str, + author_email: str, commit_message: str): + """ + Called when the `Analyzer` has reached a new commit. + params + hexsha hex SHA of the commit. + seeking_mode describes how this commit has been reached. + See CommitSeekingMode for more details on the meaning of each value. + commit_message commit summary. + + remarks + When implementing this method, you might want to preserve `seeking_mode` as a class field + so you may have access to this value in `update_graph` & `end_commit` implementations. + """ + # default implementation for backwards compatibility + if seeking_mode == CommitSeekingMode.NormalForward: + self.register_commit(hexsha, author_name, author_email, commit_message) + + def end_commit(self, hexsha: str): + """ + Called when the `Analyzer` is going to leave this commit. + + remarks + This method can be implemented as async method. """ pass @abstractmethod - def get_graph(self): + def get_graph(self) -> CallCommitGraph: """ Retrieve the graph :return: A CallCommitGraph object diff --git a/persper/analytics/lsp_graph_server/README.md b/persper/analytics/lsp_graph_server/README.md new file mode 100644 index 00000000000..df961faf602 --- /dev/null +++ b/persper/analytics/lsp_graph_server/README.md @@ -0,0 +1,29 @@ +# lsp_graph_server + +To try out the graph server backed by LSP, especially the [ccls](https://github.com/MaskRay/ccls)-based one, you need +* Compile [MaskRay/ccls](https://github.com/MaskRay/ccls), the customized ccls fork for graph server. + +* Place the compiled binary under `/bin` folder of the repository root, i.e. `/bin/ccls` or `/bin/ccls.exe`. + +## Work with notebook + +* In the repository root, run `pipenv run ./tools/repo_creater/create_repo.py test/cpp_test_repo/` to create a cpp test repo. + +* `jupyter notebook`, then open `notebooks/lsp-ccls.ipynb` + +* Execute all the cells + +## Work with unit tests + +* Open a shell under `/test/test_analytics`, run + + ```powershell + # run all of the tests + pipenv run pytest test_analyzer_lsp_ccls.py + # or run a single test + pipenv run pytest test_analyzer_lsp_ccls.py::testFeatureBranch + ``` + + * The test results are compared against baseline (by commit) in `/test/test_analytics/baseline`. + + * If there are assertion errors during testing, you can see the actual run result in `/test/test_analytics/actualdump`. \ No newline at end of file diff --git a/persper/analytics/lsp_graph_server/__init__.py b/persper/analytics/lsp_graph_server/__init__.py new file mode 100644 index 00000000000..0545b419a66 --- /dev/null +++ b/persper/analytics/lsp_graph_server/__init__.py @@ -0,0 +1,304 @@ +import asyncio +import logging +import os +import subprocess +from abc import abstractclassmethod, abstractproperty +from datetime import datetime, timedelta +from os import path +from pathlib import Path, PurePath +from typing import Dict, List, Tuple, Union + +from persper.analytics.call_commit_graph import CallCommitGraph +from persper.analytics.graph_server import GraphServer, CommitSeekingMode +from persper.analytics.another_patch_parser import parseUnifiedDiff + +from .callgraph import CallGraphScope +from .callgraph.adapters import CallCommitGraphSynchronizer +from .callgraph.builder import CallGraphBuilder, TokenizedDocument +from .callgraph.manager import CallGraphManager +from .languageclient.lspclient import LspClient + +_logger = logging.getLogger(__name__) + + +class LspClientGraphServer(GraphServer): + """ + The common base class for LSP-client backed-up call graph server. + + The derived class of this class should be used with `async with` statement: + ``` + async with LspClientGraphServer(..) as graphServer: + ... + ``` + """ + defaultLanguageServerCommand: Union[str, List[str]] = None + defaultLoggedLanguageServerCommand: Union[str, List[str]] = None + + def __init__(self, workspaceRoot: str, + languageServerCommand: Union[str, List[str]] = None, + dumpLogs: bool = False, + graph: CallCommitGraph = None): + """ + workspaceRoot: root of the temporary workspace path. LSP workspace and intermediate repository files + will be placed in this folder. + + languageServerCommand: the command line (in string, or a sequence of parameters) for starting the + language server process. If use `null` or default value, + the value of current class's `defaultLanguageServerCommand` static field will be used. + """ + self._ccgraph = graph or CallCommitGraph() + self._callGraph = CallCommitGraphSynchronizer(self._ccgraph) + self._workspaceRoot: Path = Path(workspaceRoot).resolve() + self._invalidatedFiles = set() + if not self._workspaceRoot.exists(): + self._workspaceRoot.touch() + if languageServerCommand: + self._languageServerCommand = languageServerCommand + elif dumpLogs and type(self).defaultLoggedLanguageServerCommand: + self._languageServerCommand = type(self).defaultLoggedLanguageServerCommand + else: + self._languageServerCommand = type(self).defaultLanguageServerCommand + self._lspServerProc: subprocess.Popen = None + self._lspClient: LspClient = None + self._callGraphBuilder: CallGraphBuilder = None + self._callGraphManager: CallGraphManager = None + self._lastFileWrittenTime: datetime = None + self._dumpLogs = dumpLogs + # [(oldPath, newPath, addedLines, removedLines), ...] + # added/removedLines := [[startLine, modifiedLines], ...] + self._stashedPatches: List[Tuple[PurePath, PurePath, List[Tuple[int, int]], List[Tuple[int, int]]]] = [] + self._symbolPaths = dict() + self._commitSeekingMode: CommitSeekingMode = None + + def __getstate__(self): + state = self.__dict__.copy() + state.pop("_lspServerProc", None) + state.pop("_lspClient", None) + state.pop("_callGraphBuilder", None) + state.pop("_callGraphManager", None) + return state + + def __setstate__(self, state): + self.__dict__.update(state) + if not self._workspaceRoot.exists(): + self._workspaceRoot.touch() + + def start_commit(self, hexsha: str, seeking_mode: CommitSeekingMode, author_name: str, + author_email: str, commit_message: str): + _logger.info("Start commit: %s %s (%s)", hexsha, commit_message[:32].strip(), seeking_mode) + self._commitSeekingMode = seeking_mode + if seeking_mode != CommitSeekingMode.Rewind: + self._ccgraph.add_commit(hexsha, author_name, author_email, commit_message) + self._symbolPaths.clear() + + async def update_graph(self, old_filename: str, old_src: str, new_filename: str, new_src: str, patch: bytes): + oldPath = self._workspaceRoot.joinpath(old_filename).resolve() if old_filename else None + newPath = self._workspaceRoot.joinpath(new_filename).resolve() if new_filename else None + assert oldPath or newPath + + # update node history + if self._commitSeekingMode == CommitSeekingMode.NormalForward: + if newPath is None: + # The file has been deleted + # We need to scan it before it's gone, instead of in end_commit + self._markWholeDocumentAsChanged(await self._callGraphBuilder.getTokenizedDocument(oldPath), True) + elif oldPath is None: + # The file has been added + self._stashedPatches.append((oldPath, newPath, None, None)) + else: + added, removed = parseUnifiedDiff(patch.decode('utf-8', 'replace')) + # calculate removed lines + if removed: + # we can have removed lines only when we have old file + oldDoc: TokenizedDocument = await self._callGraphBuilder.getTokenizedDocument(oldPath) + # start, end are inclusive, 1-based + for start, end in removed: + for i in range(start - 1, end): + # print("Removed L", i + 1, list((s.name, s.startPos, s.endPos) for s in oldDoc.scopesOnLine(i))) + for scope in oldDoc.scopesOnLine(i): + self._safeUpdateNodeHistory(scope, 0, 1) + self._stashedPatches.append((oldPath, newPath, added, None)) + + # perform file operations + if oldPath and oldPath != newPath: + # The file has been moved/deleted + await self._callGraphBuilder.deleteFile(oldPath) + self._invalidatedFiles.add(oldPath) + if newPath: + # The file has been created/modified + await self._callGraphBuilder.modifyFile(newPath, new_src) + self._invalidatedFiles.add(newPath) + self._lastFileWrittenTime = datetime.now() + + def _safeUpdateNodeHistory(self, scope: CallGraphScope, addedLines: int, removedLines: int): + if scope.name not in self._ccgraph.nodes(): + self._ccgraph.add_node(scope.name) + self._ccgraph.update_node_history(scope.name, addedLines, removedLines) + + def _markWholeDocumentAsChanged(self, doc: TokenizedDocument, markAsRemoved: bool): + # markAsRemoved: True: document has been deleted + # False: document has been added + parentScopes = [] + # print("_markWholeDocumentAsChanged: ", doc.fileName) + for scope in doc.scopes: + while parentScopes and parentScopes[-1][0].endPos <= scope.startPos: + # scope is out of parentScope, then the changed line count for parentScope is decided + s, c = parentScopes.pop() + self._safeUpdateNodeHistory(s, c, 0) + thisScopeLines = scope.endPos.line - scope.startPos.line + 1 + if parentScopes: + # Subtract LOC from innermost scope to eliminate dups + innermostScope = parentScopes[-1] + s, c = innermostScope + assert s.startPos <= scope.startPos and s.endPos >= scope.endPos, \ + "`scope` should be inside parent scope: {0}. parentScopes: {1}".format(s, parentScopes) + c -= thisScopeLines + # If there are more than 1 scope on the same line, + # we will count in 1 line for each scope + if s.startPos.line == scope.startPos.line: + c += 1 + if s.startPos.line < s.endPos.line == scope.endPos.line: + c += 1 + assert c >= 0, \ + "parentScope's LOC change is negative: {0}. parentScopes: {1}".format(s, parentScopes) + innermostScope[1] = c + parentScopes.append([scope, thisScopeLines]) + while parentScopes: + s, c = parentScopes.pop() + if markAsRemoved: + self._safeUpdateNodeHistory(s, 0, c) + else: + self._safeUpdateNodeHistory(s, c, 0) + + async def end_commit(self, hexsha): + # update vetices & edges + if self._commitSeekingMode != CommitSeekingMode.Rewind: + await self.updateGraph() + + # calculate added lines + if self._commitSeekingMode == CommitSeekingMode.NormalForward: + for oldPath, newPath, added, _ in self._stashedPatches: + if not newPath: + continue + if oldPath and not added: + continue + newDoc: TokenizedDocument = await self._callGraphBuilder.getTokenizedDocument(newPath) + if not oldPath: + # file has been added + self._markWholeDocumentAsChanged(newDoc, False) + else: + assert added + for start, end in added: + for i in range(start - 1, end): + # print("Added L", i + 1, list((s.name, s.startPos, s.endPos) for s in newDoc.scopesOnLine(i))) + for scope in newDoc.scopesOnLine(i): + self._safeUpdateNodeHistory(scope, 1, 0) + + # update node files + for nodeName, nodeFiles in self._symbolPaths.items(): + self._ccgraph.update_node_files(nodeName, [str(f.relative_to(self._workspaceRoot)).replace("\\", "/") for f in nodeFiles]) + + self._stashedPatches.clear() + + # ensure the files in the next commit has a different timestamp from this commit. + + if datetime.now() - self._lastFileWrittenTime < timedelta(seconds=1): + await asyncio.sleep(1) + + def get_graph(self): + return self._ccgraph + + def reset_graph(self): + self._callGraph.clear() + + def filter_file(self, filename): + filePath = self._workspaceRoot.joinpath(filename).resolve() + # _logger.info("Filter: %s -> %s", filePath, self._callGraphBuilder.filterFile(str(filePath))) + return self._callGraphBuilder.filterFile(str(filePath)) + + def config(self, param: dict): + pass + + async def __aenter__(self): + await self.startLspClient() + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self.stopLspClient() + + async def startLspClient(self): + """ + When overridden in derived class, starts the LSP server process, + and sets the following fields properly: + * self._lspServerProc + * self._lspClient + * self._callGraphBuilder + * self._callGraphManager + """ + if os.name == "nt": + self._lspServerProc = subprocess.Popen( + self._languageServerCommand, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + creationflags=subprocess.CREATE_NEW_CONSOLE) + else: + self._lspServerProc = subprocess.Popen( + self._languageServerCommand, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + shell=True) + + async def stopLspClient(self): + """ + Performs LSP client stop sequence. + This method is usually invoked in `__aexit__` so you do not have to call it manually + if you are using this class instance with `async with` statement. + """ + if not self._lspServerProc: + return + _logger.info("Shutting down language server...") + await asyncio.wait_for(self._lspClient.server.shutdown(), 10) + self._lspClient.server.exit() + try: + exitCode = self._lspServerProc.wait(10) + _logger.info("Language server %d exited with code: %s.", self._lspServerProc.pid, exitCode) + except subprocess.TimeoutExpired: + self._lspServerProc.kill() + _logger.warning("Killed language server %d.", self._lspServerProc.pid) + self._lspServerProc = None + self._callGraphBuilder = None + self._callGraphManager = None + + def invalidateFile(self, path: Union[str, Path]): + """ + Mark the call graph for the specified file as invalidated, so it should be re-generated in + the next `updateGraph` call. + """ + if isinstance(path, str): + path = Path(path).resolve() + self._invalidatedFiles.add(path) + + async def updateGraph(self): + if not self._invalidatedFiles: + return + affectedFiles = self._callGraphManager.removeByFiles(self._invalidatedFiles) + _logger.info("Invalidated %d files, affected %d files.", len(self._invalidatedFiles), len(affectedFiles)) + await self._callGraphBuilder.waitForFileSystem() + # update vertices + # Use scope full name as identifier. + for path in affectedFiles: + path: Path + if not path.exists(): + continue + for scope in await self._callGraphBuilder.enumScopesInFile(str(path)): + scope: CallGraphScope + if scope.name not in self._ccgraph.nodes().data(): + self._ccgraph.add_node(scope.name) + symbolPaths = self._symbolPaths.get(scope.name, None) + if not symbolPaths: + symbolPaths = set() + self._symbolPaths[scope.name] = symbolPaths + symbolPaths.add(scope.file) + # update edges + await self._callGraphManager.buildGraph(fileNames=affectedFiles) + self._invalidatedFiles.clear() diff --git a/persper/analytics/lsp_graph_server/callgraph/__init__.py b/persper/analytics/lsp_graph_server/callgraph/__init__.py new file mode 100644 index 00000000000..35dec6664d6 --- /dev/null +++ b/persper/analytics/lsp_graph_server/callgraph/__init__.py @@ -0,0 +1,96 @@ +""" +Basic data structures for call graph. +""" +import logging +from io import IOBase +from pathlib import Path, PurePath +from typing import Dict, Iterable, List, NamedTuple, Tuple, Type, Union + +from persper.analytics.lsp_graph_server.languageclient.lspcontract import \ + DocumentSymbol, Location, Position, SymbolInformation, SymbolKind, \ + TextDocument, TextDocumentContentChangeEvent + + +_logger = logging.getLogger(__name__) + + +class CallGraphNode(NamedTuple): + name: str + kind: SymbolKind + file: PurePath + pos: Position + length: int + + def __eq__(self, other): + if not isinstance(other, CallGraphNode): + return False + return self.name == other.name and self.file == other.file and self.pos == other.pos and self.length == other.length + + def __hash__(self): + return hash((self.name, self.kind, self.file, self.pos, self.length)) + + +class CallGraphScope(NamedTuple): + name: str + kind: SymbolKind + file: PurePath + startPos: Position + endPos: Position + + def __eq__(self, other): + if not isinstance(other, CallGraphScope): + return False + return self.name == other.name and self.file == other.file and self.startPos == other.startPos \ + and self.endPos == other.endPos + + def __hash__(self): + return hash((self.name, self.kind, self.file, self.startPos, self.endPos)) + + +class CallGraphBranch(NamedTuple): + sourceScope: CallGraphScope + definitionScope: CallGraphScope + sourceToken: CallGraphNode + definitionToken: CallGraphNode + + def __eq__(self, other): + if not isinstance(other, CallGraphBranch): + return False + return self.sourceScope == other.sourceScope and self.definitionScope == other.definitionScope \ + and self.sourceToken == other.sourceToken and self.definitionToken == other.definitionToken + + +class CallGraph(): + + def __init__(self): + self._items = set() + + @property + def items(self): + return self._items + + def add(self, branch: CallGraphBranch): + if not branch.sourceScope: + raise ValueError("branch.sourceScope should not be None.") + if not branch.definitionScope: + raise ValueError("branch.definitionScope should not be None.") + self._items.append(branch) + + def clear(self): + self._items.clear() + + def removeBySourceFiles(self, fileNames: Iterable[PurePath]): + if not isinstance(fileNames, set): + fileNames = set(fileNames) + newItems = [i for i in self._items if i.sourceScope.file not in fileNames] + _logger.info("Removed %d branches by %d files.", len(self._items) - len(newItems), len(fileNames)) + self._items = newItems + + def dump(self, file: IOBase): + for i in self._items: + file.write(str(i)) + file.write("\n") + + def dumpTo(self, fileName: str): + with open(fileName, "wt") as f: + self.dump(f) diff --git a/persper/analytics/lsp_graph_server/callgraph/adapters.py b/persper/analytics/lsp_graph_server/callgraph/adapters.py new file mode 100644 index 00000000000..b0716c07206 --- /dev/null +++ b/persper/analytics/lsp_graph_server/callgraph/adapters.py @@ -0,0 +1,31 @@ +import logging +from pathlib import Path, PurePath +from typing import Iterable + +from persper.analytics.call_commit_graph import CallCommitGraph + +from . import CallGraph, CallGraphBranch +_logger = logging.getLogger(__name__) + + +class CallCommitGraphSynchronizer(CallGraph): + def __init__(self, callCommitGraph: CallCommitGraph): + super().__init__() + self._callCommitGraph = callCommitGraph + + def add(self, branch: CallGraphBranch): + if branch.sourceScope == branch.definitionScope: + # e.g. variable referernces. + return + if branch.sourceScope is None or branch.definitionScope is None: + _logger.debug("Ignored branch with None scope: %s", branch) + return + # assuming the referenced edges has already been registered, + # or there will be Error + self._callCommitGraph.add_edge(branch.sourceScope.name, branch.definitionScope.name) + + def removeBySourceFiles(self, fileNames: Iterable[PurePath]): + pass + + def clear(self): + self._callCommitGraph.reset() diff --git a/persper/analytics/lsp_graph_server/callgraph/builder.py b/persper/analytics/lsp_graph_server/callgraph/builder.py new file mode 100644 index 00000000000..37923c2aeb3 --- /dev/null +++ b/persper/analytics/lsp_graph_server/callgraph/builder.py @@ -0,0 +1,479 @@ +import asyncio +import logging +import os +import re +import urllib.parse +from abc import ABC, abstractclassmethod +from glob import iglob +from os import path +from pathlib import Path, PurePath +from typing import Dict, Iterable, List, Type, Union + +from antlr4 import FileStream, Lexer, Token +from antlr4.error.ErrorListener import ErrorListener +from jsonrpc.exceptions import JsonRpcException + +from persper.analytics.lsp_graph_server import wildcards +from persper.analytics.lsp_graph_server.languageclient.lspclient import LspClient +from persper.analytics.lsp_graph_server.languageclient.lspcontract import \ + DocumentSymbol, Location, Position, SymbolInformation, SymbolKind, \ + TextDocument, TextDocumentContentChangeEvent, FileEvent, FileChangeType +from . import CallGraphBranch, CallGraphNode, CallGraphScope + +_logger = logging.getLogger(__name__) + +_KNOWN_EXTENSION_LANGUAGES = { + ".h": "cpp", + ".cpp": "cpp", + ".hpp": "cpp", + ".cc": "cpp", + ".c": "c" +} + + +class TokenizedDocument: + """ + Represents a fully tokenized document that supports finding a symbol or scope from + the specified document position. + """ + + def __init__(self, tokens: Iterable[Token], + documentSymbols: Iterable[Union[DocumentSymbol, SymbolInformation]], + fileName: PurePath, + documentSymbolFilter): + self._tokens = [] + self._scopes = [] + self._fileName = fileName + # cquery returns SymbolInformation, which does not contain the exact position of the defined symbol. + # We just assume symbol is at the first line of the container + # DocumentSymbol + # { (symbolLine, symbolColumn): symbolKind } + # SymbolInformation + # { (symbolLine, symbolName): (containerColumn, symbolKind) } + symbolKinds = {} + + def PopulateSymbols(symbols): + for s in symbols: + filterResult = documentSymbolFilter(s) + if filterResult == None: + continue + if filterResult == False: + if isinstance(s, DocumentSymbol): + PopulateSymbols(s.children) + continue + if isinstance(s, DocumentSymbol): + # We assume selectionRange is exactly the range of symbol name + symbolKinds[s.selectionRange.start.toTuple()] = s.kind + self._scopes.append(CallGraphScope(s.detail or s.name, s.kind, + fileName, s.range.start, s.range.end)) + if s.children: + PopulateSymbols(s.children) + elif isinstance(s, SymbolInformation): + symbolKinds[(s.location.range.start.line, s.name)] = (s.location.range.start.character, s.kind) + self._scopes.append(CallGraphScope(s.containerName, s.kind, fileName, + s.location.range.start, s.location.range.end)) + else: + _logger.error("Invalid DocumentSymbol in %s: %s", fileName, s) + + PopulateSymbols(documentSymbols) + # put the scopes in document order of start positions, then by the reversed document order of their end positions + # so that we can find the smallest scope by one traverse along the scope list. + self._scopes.sort(key=lambda sc: (sc.startPos.toTuple(), (-sc.endPos.line, -sc.endPos.character))) + NOT_EXISTS = object() + for t in tokens: + t: Token + assert t.line >= 1 + assert t.column >= 0 + line, col = t.line - 1, t.column + kind = symbolKinds.pop((line, col), NOT_EXISTS) + if kind is NOT_EXISTS: + kind = symbolKinds.get((line, t.text)) + if kind: + containerCol, kind = kind + if containerCol <= col: + # Symbol must be in the container + # e.g. + # |container |symbol |container + # |starts here |starts here |ends here + # v v v + # int main() { ... } + del symbolKinds[(line, t.text)] + else: + kind = None + self._tokens.append(CallGraphNode(t.text, kind, fileName, Position(line, col), t.stop - t.start + 1)) + + @property + def tokens(self): + return self._tokens + + @property + def scopes(self): + return self._scopes + + @property + def fileName(self): + return self._fileName + + def tokenAt(self, line: int, character: int) -> CallGraphNode: + """ + Gets the CallGraphNode from the specified 0-base line and character position + in the document. + """ + L = 0 + R = len(self._tokens) - 1 + pos = Position(line, character) + while L <= R: + M = (L+R)//2 + tokenM: CallGraphNode = self._tokens[M] + # assume there is no \n in token content + endPos = Position(tokenM.pos.line, tokenM.pos.character + tokenM.length) + if endPos <= pos: + L = M + 1 + elif tokenM.pos > pos: + R = M - 1 + else: + return tokenM + return None + + def scopesOnLine(self, line: int) -> List[CallGraphScope]: + """ + Gets all the instances of CallGraphScope from the specified 0-base line number. + """ + scopes = [] + for scope in self._scopes: + # This is inefficient (yet correct) + if scope.startPos.line > line: + break + if scope.endPos.line >= line: + scopes.append(scope) + return scopes + + def scopeAt(self, line: int, character: int) -> CallGraphScope: + """ + Gets the CallGraphScope from the specified 0-base line and character position + in the document. + """ + L = 0 + R = len(self._scopes) - 1 + MatchingM = None + pos = Position(line, character) + lastScope = None + # Find the smallest container scope, assume the scopes do not intersect with each other + # (either contains or not contains one another) + for scope in self._scopes: + # This is inefficient (yet correct) + if scope.startPos > pos: + break + if pos < scope.endPos: + assert lastScope is None or lastScope.startPos <= scope.startPos <= lastScope.endPos + lastScope = scope + return lastScope + + +class CallGraphBuilder(ABC): + """ + Building call graph branches from the given files with the specific Lexer and LspClient. + """ + + def __init__(self, lspClient: LspClient): + if not isinstance(lspClient, LspClient): + raise TypeError("lspClient should be an instance of LspClient.") + # status + self._lspClient = lspClient + self._tokenizedDocCache: Dict[str, TokenizedDocument] = {} + self._workspaceFilePatterns: List[str] = None + self._workspaceFilePatternsRegex: list[re.Pattern] = None + self._deletePendingPaths = [] + + @property + def lspClient(self): + return self._lspClient + + # @lspClient.setter + # def lspClient(self, value: LspClient): + # if not isinstance(value, LspClient): + # raise TypeError("lspClient should be an instance of LspClient.") + # self._lspClient = value + + @property + def workspaceFilePatterns(self) -> List[str]: + """ + A list of `str` containing the glob pattern of workspace files. + When performing goto defintion operations, symbols defined ouside the workspace files + will not be counted in as call graph branch. + """ + return self._workspaceFilePatterns + + @workspaceFilePatterns.setter + def workspaceFilePatterns(self, value: List[str]): + self._workspaceFilePatterns = value + if value: + self._workspaceFilePatternsRegex = [re.compile(wildcards.translate(p)) for p in value] + else: + self._workspaceFilePatternsRegex = None + + def removeDocumentCache(self, path: Union[str, PurePath]): + """ + Remove the lexer cache of a specified document by path. + + path: either be a `str` or a fully resolved `Path` instance. + In the former case, the given path string will be resolved automatically. + """ + if isinstance(path, str): + path = Path(path).resolve() + try: + del self._tokenizedDocCache[path] + except KeyError: + pass + + async def getTokenizedDocument(self, path: Union[str, PurePath]): + class MyLexerErrorListener(ErrorListener): + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + _logger.warning("%s:%d,%d: %s", path, line, column, msg) + + if isinstance(path, str): + path = Path(path).resolve() + doc = self._tokenizedDocCache.get(path) + if doc: + return doc + textDoc = TextDocument.loadFile(path, self.inferLanguageId(path)) + input = FileStream(path, encoding="utf-8", errors="replace") + lexer = self.createLexer(input) + assert isinstance(lexer, Lexer) + lexer.removeErrorListeners() + lexer.addErrorListener(MyLexerErrorListener()) + documentSymbols = [] + if await self.openDocument(textDoc): + try: + documentSymbols = await self._lspClient.server.textDocumentGetSymbols(textDoc.uri) + finally: + # _logger.info("Close doc") + self._lspClient.server.textDocumentDidClose(textDoc.uri) + + def tokenGenerator(): + while True: + tk = lexer.nextToken() + if tk.type == Token.EOF: + return + if self.filterToken(tk): + yield tk + doc = TokenizedDocument(tokenGenerator(), documentSymbols, path, + documentSymbolFilter=lambda s: self.filterSymbol(s)) + self._tokenizedDocCache[path] = doc + return doc + + def pathFromUri(self, expr: str) -> Path: + expr: str = urllib.parse.unquote(expr).strip() + if expr[:7].lower() == "file://": + if expr[7:8] == "/": # Local file + if expr[9:10] == ":": # Windows drive e.g. C: + expr = expr[8:] # Remove all the leading slashes + else: # Linux path + expr = expr[7:] + else: # UNC address + expr = expr[5:] + else: + expr = urllib.parse.unquote(expr) + return Path(expr).resolve() + + @abstractclassmethod + def filterToken(self, token: Token) -> bool: + """ + When overridden in the derived class, determines whether the given token + has the need to perform goto definition LSP invocations on. + """ + raise NotImplementedError + + def filterSymbol(self, symbol: Union[DocumentSymbol, SymbolInformation]) -> bool: + """ + When overridden in the derived class, determines whether the given symbol + should be treated as a target of goto definition / scope / call graph vertex. + Returns + True symbol should be included and its children, if available, will pass filterSymbol + False symbol should be excluded, while its children will pass filterSymbol + None symbol and its children will be excluded + """ + if symbol.kind in { + SymbolKind.Parameter, + SymbolKind.TypeParameter + }: + return None + return symbol.kind in { + SymbolKind.Constructor, + SymbolKind.Enum, + SymbolKind.Function, + SymbolKind.Macro, + SymbolKind.Method, + SymbolKind.Operator, + SymbolKind.Property, + SymbolKind.StaticMethod, + } + + def filterFile(self, fileName: str): + if self._workspaceFilePatternsRegex: + return any(p.match(str(fileName)) for p in self._workspaceFilePatternsRegex) + return True + + def inferLanguageId(self, path: PurePath) -> str: + """ + Infers the language ID for the given document path. + """ + ext = path.suffix.lower() + return _KNOWN_EXTENSION_LANGUAGES[ext] + + @abstractclassmethod + def createLexer(self, fileStream: FileStream) -> Lexer: + raise NotImplementedError + + async def openDocument(self, textDoc: TextDocument): + """ + Opens the specified text document, notifying the LSP server. + """ + self._lspClient.server.textDocumentDidOpen(textDoc) + + async def closeDocument(self, uri: str): + """ + Closes the specified text document, notifying the LSP server. + + uri: URI of the text document. + """ + self._lspClient.server.textDocumentDidClose(uri) + + async def buildCallGraphInFiles(self, globPattern: Union[str, Iterable[str]] = None): + """ + Build call graph branches asynchronously in files matching the specified glob pattern(s). + """ + if not globPattern: + if not self._workspaceFilePatterns: + raise ValueError("globPattern is required if workspaceFilePatterns is not available.") + globPattern = self._workspaceFilePatterns[0] + if isinstance(globPattern, str): + globPattern = [globPattern] + visitedPaths = set() + for pattern in globPattern: + for fileName in iglob(pattern, recursive=True): + if not path.isfile(fileName): + continue + if fileName in visitedPaths: + continue + visitedPaths.add(fileName) + async for node in self.buildCallGraphInFile(fileName): + yield node + + async def buildCallGraphInFile(self, fileName: str) -> Iterable[CallGraphBranch]: + """ + Build call graph branches asynchronously in the specified file. + """ + srcPath = self.pathFromUri(fileName) + _logger.info("Build call graph in: %s", srcPath) + counter = 0 + thisDoc = await self.getTokenizedDocument(srcPath) + textDoc = TextDocument.loadFile(srcPath, self.inferLanguageId(srcPath)) + if not await self.openDocument(textDoc): + return + try: + for node in thisDoc.tokens: + # Do not waste time on this + if node.kind == SymbolKind.Namespace: + continue + # Put the cursor to the middle. + line, col = node.pos.line, node.pos.character + node.length//2 + _logger.debug(node) + task = self._lspClient.server.textDocumentGotoDefinition(textDoc.uri, (line, col)) + nodeScope = thisDoc.scopeAt(line, col) + defs = await task + defNodes = [] + for d in defs: + d: Location + defPath = self.pathFromUri(d.uri) + if not self.filterFile(defPath): + continue + defsDoc = None + defsDoc = await self.getTokenizedDocument(defPath) + defNode = defsDoc.tokenAt(d.range.start.line, d.range.start.character) + defScope = defsDoc.scopeAt(d.range.start.line, d.range.start.character) + if not defNode: + # Failed to retrieve a node from the given position. + _logger.warning("Failed to retrieve node from %s:%s.", defPath, d.range) + defNode = CallGraphNode(None, None, defPath, d.range.start, None) + if defNode == node: + # This node itself is a definition. Do not waste time on this. + defNodes = None + break + if defNode.kind == SymbolKind.Namespace: + # Find some namespace. Do not waste time on this. + defNodes = None + break + defNodes.append((defNode, defScope)) + if defNodes: + for dn, ds in defNodes: + counter += 1 + yield CallGraphBranch(nodeScope, ds, node, dn) + finally: + await self.closeDocument(textDoc.uri) + _logger.info("Yielded %d branches.", counter) + + async def enumScopesInFile(self, fileName: str) -> Iterable[CallGraphScope]: + """ + Enumerate all the "scope"s in the specified file. + Scopes are vertices of the call graph. + """ + srcPath = self.pathFromUri(fileName) + thisDoc: TokenizedDocument = await self.getTokenizedDocument(srcPath) + return thisDoc.scopes + + async def deleteFile(self, fileName: str): + path = Path(fileName).resolve() + self.removeDocumentCache(path) + if not path.exists: + return False + await self.waitForFileSystem(relaxed=True) + await self.deleteFileCore(path) + self._deletePendingPaths.append(path) + return True + + async def deleteFileCore(self, filePath: Path): + doc = TextDocument(TextDocument.fileNameToUri(str(filePath)), self.inferLanguageId(filePath), 1, "") + self._lspClient.server.textDocumentDidOpen(doc) + # Empty the file and notify language server. + self._lspClient.server.textDocumentDidChange(doc.uri, 2, [TextDocumentContentChangeEvent("")]) + filePath.unlink() + self._lspClient.server.textDocumentDidSave(doc.uri) + await self.closeDocument(doc.uri) + + async def waitForFileSystem(self, relaxed: bool = False): + if not relaxed and len(self._deletePendingPaths) > 0 or len(self._deletePendingPaths) > 100: + for p in self._deletePendingPaths: + p: Path + if p.exists(): + await asyncio.sleep(0.1) + else: + _logger.info("Confirm deleted: %s", p) + self._lspClient.server.workspaceDidChangeWatchedFiles( + [FileEvent(TextDocument.fileNameToUri(p), FileChangeType.Deleted) for p in self._deletePendingPaths]) + self._deletePendingPaths.clear() + + async def modifyFile(self, fileName: str, newContent: str): + """ + Modify a file's content, notifying the language server, as if the file + is modified in the editor. + """ + if newContent is None: + newContent = "" + path = Path(fileName).resolve() + self.removeDocumentCache(path) + try: + await self.modifyFileCore(path, newContent) + except Exception as ex: + raise Exception("Cannot modify {0}.".format(path)) from ex + + async def modifyFileCore(self, filePath: Path, newContent: str): + os.makedirs(str(filePath.parent), exist_ok=True) + prevFileExists = filePath.exists() + with open(str(filePath), "wt", encoding="utf-8", errors="replace") as f: + f.write(newContent) + uri = TextDocument.fileNameToUri(filePath) + self._lspClient.server.workspaceDidChangeWatchedFiles( + [FileEvent(uri, + FileChangeType.Changed if prevFileExists else FileChangeType.Created)]) + _logger.info("Modified %s.", filePath) diff --git a/persper/analytics/lsp_graph_server/callgraph/manager.py b/persper/analytics/lsp_graph_server/callgraph/manager.py new file mode 100644 index 00000000000..85b6cd6d00b --- /dev/null +++ b/persper/analytics/lsp_graph_server/callgraph/manager.py @@ -0,0 +1,92 @@ +""" +Contains CallGraphManager. +""" +import logging +from pathlib import Path, PurePath +from typing import Iterable, NamedTuple, Tuple, Union +from os import path + +from . import CallGraph +from .builder import CallGraphBuilder + +_logger = logging.getLogger(__name__) + + +class CallGraphManager(): + """ + Used to construct / update call graph independently of specific implementations of + CallGraphBuilder. + """ + + def __init__(self, builder: CallGraphBuilder, callGraph: CallGraph = None): + if not isinstance(builder, CallGraphBuilder): + raise TypeError("builderType should be a subtype of CallGraphBuilder.") + self._builder = builder + self._graph = callGraph or CallGraph() + # self._rebuildCounter = 0 + + @property + def graph(self): + """ + Gets the underlying CallGraph. + """ + return self._graph + + async def buildGraph(self, fileNames: Union[str, Iterable[str]] = None, globPattern: Union[str, Iterable[str]] = None): + """ + Build call graph branches from the specified files. + + globPattern: `str` or `str[]` containing the glob pattern of the files + from which to build the call graph branches. + """ + branchCounter = 0 # with dups + fileCounter = 0 + await self._builder.waitForFileSystem() + + def pushBranch(branch): + nonlocal branchCounter + try: + self._graph.add(branch) + branchCounter += 1 + if branchCounter % 2000 == 0: + _logger.info("Already added %d branches.", branchCounter) + except ValueError as ex: + _logger.debug("%s Branch: %s", ex, branch) + + if fileNames: + if isinstance(fileNames, (str, PurePath)): + fileNames = [fileNames] + for fn in fileNames: + sfn = str(fn) + if not path.exists(sfn): + continue + fileCounter += 1 + async for b in self._builder.buildCallGraphInFile(sfn): + pushBranch(b) + if globPattern or not fileNames: + async for b in self._builder.buildCallGraphInFiles(globPattern): + pushBranch(b) + if fileNames and not globPattern: + _logger.info("Added %d branches from %d files.", branchCounter, fileCounter) + else: + _logger.info("Added %d branches.", branchCounter) + + def removeByFiles(self, fileNames: Iterable[str]) -> Iterable[Path]: + """ + Clear the graph nodes whose source or definition node contains the specified files. + """ + fileNames = set((Path(f).resolve() for f in fileNames)) + affectedFiles = set((i.sourceScope.file for i in self._graph.items if i.definitionScope.file in fileNames)) + affectedFiles.update(fileNames) + self._graph.removeBySourceFiles(affectedFiles) + return affectedFiles + + async def rebuildGraph(self, fileNames: Iterable[str]): + """ + Rebuild the source graph for the specified files. This operation will clear and rebuild the graph nodes + whose source or definition node contains the specified files. + """ + affectedFiles = self.removeByFiles(fileNames) + # self._rebuildCounter += 1 + # self._graph.dumpTo("rebuild_" + str(self._rebuildCounter) + ".txt") + await self.buildGraph((str(p) for p in affectedFiles)) diff --git a/persper/analytics/lsp_graph_server/ccls.py b/persper/analytics/lsp_graph_server/ccls.py new file mode 100644 index 00000000000..8e18382f9bb --- /dev/null +++ b/persper/analytics/lsp_graph_server/ccls.py @@ -0,0 +1,200 @@ +""" +ccls client-side LSP support. +""" +import logging +import os +from asyncio import sleep +from pathlib import Path, PurePath +from typing import List, Union + +from antlr4 import Token +from antlr4.FileStream import FileStream +from jsonrpc.endpoint import Endpoint +from jsonrpc.exceptions import JsonRpcException + +from persper.analytics.call_commit_graph import CallCommitGraph + +from . import LspClientGraphServer +from .callgraph.builder import CallGraphBuilder +from .callgraph.manager import CallGraphManager +from .fileparsers.CPP14Lexer import CPP14Lexer +from .languageclient.lspclient import LspClient +from .languageclient.lspcontract import LspContractObject, TextDocument, TextDocumentContentChangeEvent +from .languageclient.lspserver import LspServerStub + +_logger = logging.getLogger(__name__) + + +class CclsInfo(LspContractObject): + def __init__(self, pendingIndexRequests: int, postIndexWorkItems: int, projectEntries: int): + self.pendingIndexRequests = pendingIndexRequests + self.postIndexWorkItems = postIndexWorkItems + self.projectEntries = projectEntries + + def toDict(self): + raise NotImplementedError() + + @staticmethod + def fromDict(d: dict): + return CclsInfo(int(d["pipeline"]["pendingIndexRequests"]), + 0, + int(d["project"]["entries"])) + + +class CclsLspServerStub(LspServerStub): + def __init__(self, endpoint: Endpoint): + super().__init__(endpoint) + + async def cclsInfo(self): + """ + Gets the ccls language server status. + """ + result = await self.request("$ccls/info") + return CclsInfo.fromDict(result) + + +class CclsLspClient(LspClient): + def __init__(self, rx, tx, logFile: str = None): + super().__init__(rx, tx, logFile) + self._serverStub = CclsLspServerStub(self._endpoint) + + def m_ccls__publish_skipped_ranges(self, uri: str, skippedRanges: list): + pass + + def m_ccls__publish_semantic_highlight(self, uri: str, symbols: list): + pass + + +class CclsCallGraphBuilder(CallGraphBuilder): + # Do not F12 on operators. cquery tend to randomly jump to false-positives for non-overloaded operators. + _tokensOfInterest = {CPP14Lexer.Identifier, + # CPP14Lexer.Plus, + # CPP14Lexer.Minus, + # CPP14Lexer.Star, + # CPP14Lexer.Div, + # CPP14Lexer.Mod, + # CPP14Lexer.Caret, + # CPP14Lexer.And, + # CPP14Lexer.Or, + # CPP14Lexer.Tilde, + # CPP14Lexer.Not, + # CPP14Lexer.Assign, + # CPP14Lexer.Less, + # CPP14Lexer.Greater, + # CPP14Lexer.PlusAssign, + # CPP14Lexer.MinusAssign, + # CPP14Lexer.StarAssign, + # CPP14Lexer.DivAssign, + # CPP14Lexer.ModAssign, + # CPP14Lexer.XorAssign, + # CPP14Lexer.AndAssign, + # CPP14Lexer.OrAssign, + # CPP14Lexer.LeftShift, + # CPP14Lexer.LeftShiftAssign, + # CPP14Lexer.Equal, + # CPP14Lexer.NotEqual, + # CPP14Lexer.LessEqual, + # CPP14Lexer.GreaterEqual, + # CPP14Lexer.AndAnd, + # CPP14Lexer.OrOr, + # CPP14Lexer.PlusPlus, + # CPP14Lexer.MinusMinus + } + + def __init__(self, lspClient: CclsLspClient): + if not isinstance(lspClient, CclsLspClient): + raise TypeError("lspClient should be an instance of CclsLspClient.") + super().__init__(lspClient) + self._lspClient: CclsLspClient + + def createLexer(self, fileStream: FileStream): + return CPP14Lexer(fileStream) + + def filterToken(self, token: Token): + return token.type in self._tokensOfInterest + + def inferLanguageId(self, path: PurePath): + return "cpp" + + def modifyFile(self, fileName: str, newContent: str): + return super().modifyFile(fileName, newContent) + + async def _waitForJobs(self): + lastJobs = None + while True: + info: CclsInfo = await self._lspClient.server.cclsInfo() + curJobs = info.pendingIndexRequests + info.postIndexWorkItems + if curJobs != lastJobs: + _logger.debug("Server jobs: %d.", curJobs) + lastJobs = curJobs + if curJobs == 0: + break + if curJobs < 5: + await sleep(0.05) + elif curJobs < 50: + await sleep(0.1) + else: + await sleep(1) + + async def openDocument(self, textDoc: TextDocument): + self._lspClient.server.textDocumentDidOpen(textDoc) + while True: + try: + await self._waitForJobs() + return True + except JsonRpcException as ex: + if ex.code == -32002: + _logger.warning("Language server is not ready. Waiting…") + await sleep(5) + elif ex.code == -32603 and "unable to find" in ex.message: + _logger.warning("The file seems invalid. Server error: %s", ex.message) + return False + raise + + +class CclsGraphServer(LspClientGraphServer): + + defaultLanguageServerCommand = "./bin/ccls" + defaultLoggedLanguageServerCommand = "./bin/ccls -log-file=ccls.log" + + def __init__(self, workspaceRoot: str, cacheRoot: str = None, + languageServerCommand: Union[str, List[str]] = None, + dumpLogs: bool = False, + graph: CallCommitGraph = None): + super().__init__(workspaceRoot, languageServerCommand=languageServerCommand, + dumpLogs=dumpLogs, graph=graph) + self._cacheRoot = Path(cacheRoot).resolve() if cacheRoot else self._workspaceRoot.joinpath(".ccls-cache") + self._c_requireScopeDefinitionMatch = True + + async def startLspClient(self): + await super().startLspClient() + self._lspClient = CclsLspClient(self._lspServerProc.stdout, self._lspServerProc.stdin, + logFile="rpclog.log" if self._dumpLogs else None) + self._lspClient.start() + _logger.debug(await self._lspClient.server.initialize( + rootFolder=self._workspaceRoot, + initializationOptions={"cacheDirectory": str(self._cacheRoot), + "diagnostics": {"onParse": False, "onType": False}, + "discoverSystemIncludes": True, + "enableCacheRead": True, + "enableCacheWrite": True, + "clang": { + "excludeArgs": [], + "extraArgs": ["-nocudalib"], + "pathMappings": [], + "resourceDir": "" + }, + "index": {"threads": 0} + })) + self._lspClient.server.initialized() + self._callGraphBuilder = CclsCallGraphBuilder(self._lspClient) + self._callGraphBuilder.workspaceFilePatterns = [ + str(self._workspaceRoot.joinpath("**/*.[Hh]")), + str(self._workspaceRoot.joinpath("**/*.[Hh][Hh]")), + str(self._workspaceRoot.joinpath("**/*.[Hh][Pp][Pp]")), + str(self._workspaceRoot.joinpath("**/*.[Cc]")), + str(self._workspaceRoot.joinpath("**/*.[Cc][Cc]")), + str(self._workspaceRoot.joinpath("**/*.[Cc][Pp][Pp]")), + str(self._workspaceRoot.joinpath("**/*.[Cc][Xx][Xx]")) + ] + self._callGraphManager = CallGraphManager(self._callGraphBuilder, self._callGraph) diff --git a/persper/analytics/lsp_graph_server/cquery.py b/persper/analytics/lsp_graph_server/cquery.py new file mode 100644 index 00000000000..1e1c176c9ff --- /dev/null +++ b/persper/analytics/lsp_graph_server/cquery.py @@ -0,0 +1,111 @@ +import logging +from asyncio import sleep +from pathlib import Path, PurePath + +from antlr4 import Token +from jsonrpc.endpoint import Endpoint +from jsonrpc.exceptions import JsonRpcException + +from persper.analytics.lsp_graph_server.callgraph.builder import CallGraphBuilder +from persper.analytics.lsp_graph_server.fileparsers.CPP14Lexer import CPP14Lexer +from persper.analytics.lsp_graph_server.languageclient.lspclient import LspClient +from persper.analytics.lsp_graph_server.languageclient.lspcontract import TextDocument +from persper.analytics.lsp_graph_server.languageclient.lspserver import LspServerStub + +_logger = logging.getLogger(__name__) + + +class CQueryLspServerStub(LspServerStub): + def __init__(self, endpoint: Endpoint): + super().__init__(endpoint) + + def freshenIndex(self): + self.notify("$cquery/freshenIndex") + + def textDocumentDidView(self, documentUri: str): + self.notify("$cquery/textDocumentDidView", {"textDocumentUri": documentUri}) + + +class CQueryLspClient(LspClient): + def __init__(self, rx, tx): + super().__init__(rx, tx) + self._serverStub = CQueryLspServerStub(self._endpoint) + self._isBusy = False + + @property + def isBusy(self): + return self._isBusy + + def m_cquery__progress(self, indexRequestCount=0, doIdMapCount=0, loadPreviousIndexCount=0, onIdMappedCount=0, onIndexedCount=0, activeThreads=0): + # See https://github.com/cquery-project/vscode-cquery/blob/8ded1bd94548f9341bd9f1f1a636af01602012e0/src/extension.ts#L559 + total = indexRequestCount + doIdMapCount + loadPreviousIndexCount + onIdMappedCount + onIndexedCount + activeThreads + self._isBusy = total > 0 + _logger.log(logging.INFO if total > 0 else logging.DEBUG, "Req:%d IdMap:%d/%d/%d Threads:%d", + indexRequestCount, doIdMapCount, onIdMappedCount, onIndexedCount, activeThreads) + + +class CQueryCallGraphBuilder(CallGraphBuilder): + # Do not F12 on operators. cquery tend to randomly jump to false-positives for non-overloaded operators. + _tokensOfInterest = {CPP14Lexer.Identifier, + # CPP14Lexer.Plus, + # CPP14Lexer.Minus, + # CPP14Lexer.Star, + # CPP14Lexer.Div, + # CPP14Lexer.Mod, + # CPP14Lexer.Caret, + # CPP14Lexer.And, + # CPP14Lexer.Or, + # CPP14Lexer.Tilde, + # CPP14Lexer.Not, + # CPP14Lexer.Assign, + # CPP14Lexer.Less, + # CPP14Lexer.Greater, + # CPP14Lexer.PlusAssign, + # CPP14Lexer.MinusAssign, + # CPP14Lexer.StarAssign, + # CPP14Lexer.DivAssign, + # CPP14Lexer.ModAssign, + # CPP14Lexer.XorAssign, + # CPP14Lexer.AndAssign, + # CPP14Lexer.OrAssign, + # CPP14Lexer.LeftShift, + # CPP14Lexer.LeftShiftAssign, + # CPP14Lexer.Equal, + # CPP14Lexer.NotEqual, + # CPP14Lexer.LessEqual, + # CPP14Lexer.GreaterEqual, + # CPP14Lexer.AndAnd, + # CPP14Lexer.OrOr, + # CPP14Lexer.PlusPlus, + # CPP14Lexer.MinusMinus + } + + def __init__(self, lspClient: LspClient): + if not isinstance(lspClient, CQueryLspClient): + raise TypeError("lspClient should be an instance of CQueryLspClient.") + super().__init__(CPP14Lexer, lspClient) + + def filterToken(self, token: Token): + return token.type in self._tokensOfInterest + + def inferLanguageId(self, path: PurePath): + return "cpp" + + def modifyFile(self, fileName: str, newContent: str): + old = super().modifyFile(fileName, newContent) + self._lspClient.server.freshenIndex() + return old + + async def openDocument(self, textDoc: TextDocument): + self._lspClient.server.textDocumentDidOpen(textDoc) + while True: + try: + while self._lspClient.isBusy: + await sleep(1) + await self._lspClient.server.textDocumentCodeLens(textDoc.uri) + return + except JsonRpcException as ex: + # cquery specific + if ex.code == -32603 and "Unable to find file" in ex.message: + _logger.warning("Language server is not ready. Waiting…") + await sleep(5) diff --git a/persper/analytics/lsp_graph_server/fileparsers/CPP14Lexer.py b/persper/analytics/lsp_graph_server/fileparsers/CPP14Lexer.py new file mode 100644 index 00000000000..a85e5a022bd --- /dev/null +++ b/persper/analytics/lsp_graph_server/fileparsers/CPP14Lexer.py @@ -0,0 +1,964 @@ +# Generated from .\cpp\CPP14.g4 by ANTLR 4.7.1 +from antlr4 import * +from io import StringIO +from typing.io import TextIO +import sys + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u0090") + buf.write("\u05a8\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") + buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r") + buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23") + buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30") + buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36") + buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%") + buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.") + buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64") + buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:") + buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t") + buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t") + buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t") + buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4") + buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4") + buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4") + buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4") + buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080") + buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083") + buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087") + buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a") + buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e") + buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091") + buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095") + buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098") + buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c") + buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f") + buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3") + buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6") + buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\3\2\3") + buf.write("\2\7\2\u0156\n\2\f\2\16\2\u0159\13\2\3\2\3\2\5\2\u015d") + buf.write("\n\2\3\2\6\2\u0160\n\2\r\2\16\2\u0161\3\2\6\2\u0165\n") + buf.write("\2\r\2\16\2\u0166\3\2\3\2\3\3\3\3\7\3\u016d\n\3\f\3\16") + buf.write("\3\u0170\13\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4") + buf.write("\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3") + buf.write("\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t") + buf.write("\3\t\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13") + buf.write("\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3") + buf.write("\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3") + buf.write("\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20") + buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22") + buf.write("\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23") + buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24") + buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25") + buf.write("\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27") + buf.write("\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31") + buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31") + buf.write("\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34") + buf.write("\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35") + buf.write("\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36") + buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3!\3") + buf.write("!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3$") + buf.write("\3$\3$\3$\3$\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'") + buf.write("\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3") + buf.write("*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3") + buf.write(",\3,\3-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3.\3.\3") + buf.write(".\3/\3/\3/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60") + buf.write("\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61") + buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63") + buf.write("\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64") + buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64") + buf.write("\3\64\3\64\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66") + buf.write("\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67") + buf.write("\38\38\38\38\38\38\38\39\39\39\39\39\39\39\3:\3:\3:\3") + buf.write(":\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3") + buf.write(";\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3") + buf.write("=\3>\3>\3>\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3@\3@\3@\3") + buf.write("@\3@\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3B\3B\3") + buf.write("B\3B\3B\3C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3") + buf.write("E\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3") + buf.write("G\3H\3H\3H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3") + buf.write("J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3") + buf.write("L\3L\3M\3M\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3") + buf.write("P\3P\3Q\3Q\3R\3R\3S\3S\3T\3T\3U\3U\3V\3V\3W\3W\3X\3X\3") + buf.write("Y\3Y\3Z\3Z\3[\3[\3\\\3\\\3]\3]\3^\3^\3_\3_\3`\3`\3a\3") + buf.write("a\3b\3b\3b\3c\3c\3c\3d\3d\3d\3e\3e\3e\3f\3f\3f\3g\3g\3") + buf.write("g\3h\3h\3h\3i\3i\3i\3j\3j\3j\3k\3k\3k\3k\3l\3l\3l\3m\3") + buf.write("m\3m\3n\3n\3n\3o\3o\3o\3p\3p\3p\3q\3q\3q\3r\3r\3r\3s\3") + buf.write("s\3s\3t\3t\3u\3u\3u\3u\3v\3v\3v\3w\3w\3x\3x\3y\3y\3y\3") + buf.write("z\3z\3{\3{\3|\3|\3|\3}\3}\3}\3}\3~\3~\3~\3~\3~\3\177\3") + buf.write("\177\3\177\3\177\3\177\3\177\3\177\3\177\3\177\3\177\5") + buf.write("\177\u0421\n\177\3\u0080\3\u0080\3\u0080\7\u0080\u0426") + buf.write("\n\u0080\f\u0080\16\u0080\u0429\13\u0080\3\u0081\3\u0081") + buf.write("\5\u0081\u042d\n\u0081\3\u0082\3\u0082\3\u0083\3\u0083") + buf.write("\3\u0084\3\u0084\5\u0084\u0435\n\u0084\3\u0084\3\u0084") + buf.write("\5\u0084\u0439\n\u0084\3\u0084\3\u0084\5\u0084\u043d\n") + buf.write("\u0084\3\u0084\3\u0084\5\u0084\u0441\n\u0084\5\u0084\u0443") + buf.write("\n\u0084\3\u0085\3\u0085\5\u0085\u0447\n\u0085\3\u0085") + buf.write("\7\u0085\u044a\n\u0085\f\u0085\16\u0085\u044d\13\u0085") + buf.write("\3\u0086\3\u0086\5\u0086\u0451\n\u0086\3\u0086\7\u0086") + buf.write("\u0454\n\u0086\f\u0086\16\u0086\u0457\13\u0086\3\u0087") + buf.write("\3\u0087\3\u0087\3\u0087\5\u0087\u045d\n\u0087\3\u0087") + buf.write("\3\u0087\5\u0087\u0461\n\u0087\3\u0087\7\u0087\u0464\n") + buf.write("\u0087\f\u0087\16\u0087\u0467\13\u0087\3\u0088\3\u0088") + buf.write("\3\u0088\3\u0088\5\u0088\u046d\n\u0088\3\u0088\3\u0088") + buf.write("\5\u0088\u0471\n\u0088\3\u0088\7\u0088\u0474\n\u0088\f") + buf.write("\u0088\16\u0088\u0477\13\u0088\3\u0089\3\u0089\3\u008a") + buf.write("\3\u008a\3\u008b\3\u008b\3\u008c\3\u008c\3\u008d\3\u008d") + buf.write("\5\u008d\u0483\n\u008d\3\u008d\3\u008d\5\u008d\u0487\n") + buf.write("\u008d\3\u008d\3\u008d\5\u008d\u048b\n\u008d\3\u008d\3") + buf.write("\u008d\5\u008d\u048f\n\u008d\5\u008d\u0491\n\u008d\3\u008e") + buf.write("\3\u008e\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090\3\u0090") + buf.write("\5\u0090\u049b\n\u0090\3\u0091\3\u0091\6\u0091\u049f\n") + buf.write("\u0091\r\u0091\16\u0091\u04a0\3\u0091\3\u0091\3\u0091") + buf.write("\3\u0091\3\u0091\6\u0091\u04a8\n\u0091\r\u0091\16\u0091") + buf.write("\u04a9\3\u0091\3\u0091\3\u0091\3\u0091\3\u0091\6\u0091") + buf.write("\u04b1\n\u0091\r\u0091\16\u0091\u04b2\3\u0091\3\u0091") + buf.write("\3\u0091\3\u0091\3\u0091\6\u0091\u04ba\n\u0091\r\u0091") + buf.write("\16\u0091\u04bb\3\u0091\3\u0091\5\u0091\u04c0\n\u0091") + buf.write("\3\u0092\3\u0092\3\u0092\5\u0092\u04c5\n\u0092\3\u0093") + buf.write("\3\u0093\3\u0093\5\u0093\u04ca\n\u0093\3\u0094\3\u0094") + buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094") + buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094") + buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\5\u0094") + buf.write("\u04e2\n\u0094\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095") + buf.write("\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\5\u0095") + buf.write("\u04ef\n\u0095\3\u0096\3\u0096\3\u0096\3\u0096\6\u0096") + buf.write("\u04f5\n\u0096\r\u0096\16\u0096\u04f6\3\u0097\3\u0097") + buf.write("\5\u0097\u04fb\n\u0097\3\u0097\5\u0097\u04fe\n\u0097\3") + buf.write("\u0097\3\u0097\3\u0097\5\u0097\u0503\n\u0097\5\u0097\u0505") + buf.write("\n\u0097\3\u0098\5\u0098\u0508\n\u0098\3\u0098\3\u0098") + buf.write("\3\u0098\3\u0098\3\u0098\5\u0098\u050f\n\u0098\3\u0099") + buf.write("\3\u0099\5\u0099\u0513\n\u0099\3\u0099\3\u0099\3\u0099") + buf.write("\5\u0099\u0518\n\u0099\3\u0099\5\u0099\u051b\n\u0099\3") + buf.write("\u009a\3\u009a\3\u009b\3\u009b\5\u009b\u0521\n\u009b\3") + buf.write("\u009b\7\u009b\u0524\n\u009b\f\u009b\16\u009b\u0527\13") + buf.write("\u009b\3\u009c\3\u009c\3\u009d\5\u009d\u052c\n\u009d\3") + buf.write("\u009d\3\u009d\7\u009d\u0530\n\u009d\f\u009d\16\u009d") + buf.write("\u0533\13\u009d\3\u009d\3\u009d\5\u009d\u0537\n\u009d") + buf.write("\3\u009d\3\u009d\5\u009d\u053b\n\u009d\3\u009e\3\u009e") + buf.write("\3\u009e\5\u009e\u0540\n\u009e\3\u009f\3\u009f\3\u009f") + buf.write("\5\u009f\u0545\n\u009f\3\u00a0\3\u00a0\7\u00a0\u0549\n") + buf.write("\u00a0\f\u00a0\16\u00a0\u054c\13\u00a0\3\u00a0\3\u00a0") + buf.write("\7\u00a0\u0550\n\u00a0\f\u00a0\16\u00a0\u0553\13\u00a0") + buf.write("\3\u00a0\3\u00a0\7\u00a0\u0557\n\u00a0\f\u00a0\16\u00a0") + buf.write("\u055a\13\u00a0\3\u00a0\3\u00a0\3\u00a1\3\u00a1\3\u00a1") + buf.write("\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a1") + buf.write("\3\u00a1\3\u00a1\5\u00a1\u056a\n\u00a1\3\u00a2\3\u00a2") + buf.write("\5\u00a2\u056e\n\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a2") + buf.write("\3\u00a2\3\u00a2\5\u00a2\u0576\n\u00a2\3\u00a3\3\u00a3") + buf.write("\3\u00a3\3\u00a4\3\u00a4\3\u00a4\3\u00a5\3\u00a5\3\u00a6") + buf.write("\6\u00a6\u0581\n\u00a6\r\u00a6\16\u00a6\u0582\3\u00a6") + buf.write("\3\u00a6\3\u00a7\3\u00a7\5\u00a7\u0589\n\u00a7\3\u00a7") + buf.write("\5\u00a7\u058c\n\u00a7\3\u00a7\3\u00a7\3\u00a8\3\u00a8") + buf.write("\3\u00a8\3\u00a8\7\u00a8\u0594\n\u00a8\f\u00a8\16\u00a8") + buf.write("\u0597\13\u00a8\3\u00a8\3\u00a8\3\u00a8\3\u00a8\3\u00a8") + buf.write("\3\u00a9\3\u00a9\3\u00a9\3\u00a9\7\u00a9\u05a2\n\u00a9") + buf.write("\f\u00a9\16\u00a9\u05a5\13\u00a9\3\u00a9\3\u00a9\7\u0157") + buf.write("\u054a\u0551\u0558\u0595\2\u00aa\3\3\5\4\7\5\t\6\13\7") + buf.write("\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21") + buf.write("!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67") + buf.write("\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61") + buf.write("a\62c\63e\64g\65i\66k\67m8o9q:s;u{?}@\177A\u0081") + buf.write("B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008fI\u0091") + buf.write("J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1") + buf.write("R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1") + buf.write("Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1") + buf.write("b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1") + buf.write("j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00ddp\u00dfq\u00e1") + buf.write("r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00efy\u00f1") + buf.write("z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\2\u00fd\2\u00ff\177") + buf.write("\u0101\2\u0103\2\u0105\2\u0107\u0080\u0109\u0081\u010b") + buf.write("\u0082\u010d\u0083\u010f\u0084\u0111\2\u0113\2\u0115\2") + buf.write("\u0117\2\u0119\u0085\u011b\2\u011d\2\u011f\2\u0121\u0086") + buf.write("\u0123\2\u0125\2\u0127\2\u0129\2\u012b\2\u012d\u0087\u012f") + buf.write("\2\u0131\2\u0133\2\u0135\2\u0137\2\u0139\u0088\u013b\2") + buf.write("\u013d\2\u013f\2\u0141\u0089\u0143\u008a\u0145\u008b\u0147") + buf.write("\u008c\u0149\2\u014b\u008d\u014d\u008e\u014f\u008f\u0151") + buf.write("\u0090\3\2\22\3\2\f\f\5\2C\\aac|\3\2\62;\3\2\63;\3\2\62") + buf.write("9\5\2\62;CHch\3\2\62\63\4\2WWww\4\2NNnn\6\2\f\f\17\17") + buf.write("))^^\4\2--//\6\2HHNNhhnn\5\2NNWWww\6\2\f\f\17\17$$^^\4") + buf.write("\2\13\13\"\"\4\2\f\f\17\17\2\u05e6\2\3\3\2\2\2\2\5\3\2") + buf.write("\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2") + buf.write("\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2") + buf.write("\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37") + buf.write("\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2") + buf.write("\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2") + buf.write("\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2") + buf.write("\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2") + buf.write("\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2") + buf.write("\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3") + buf.write("\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a") + buf.write("\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2") + buf.write("k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2") + buf.write("\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2") + buf.write("\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085") + buf.write("\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2") + buf.write("\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093") + buf.write("\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2") + buf.write("\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1") + buf.write("\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2") + buf.write("\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af") + buf.write("\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2") + buf.write("\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd") + buf.write("\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2") + buf.write("\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb") + buf.write("\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2") + buf.write("\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9") + buf.write("\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2") + buf.write("\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7") + buf.write("\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2") + buf.write("\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5") + buf.write("\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00ff\3\2\2") + buf.write("\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d") + buf.write("\3\2\2\2\2\u010f\3\2\2\2\2\u0119\3\2\2\2\2\u0121\3\2\2") + buf.write("\2\2\u012d\3\2\2\2\2\u0139\3\2\2\2\2\u0141\3\2\2\2\2\u0143") + buf.write("\3\2\2\2\2\u0145\3\2\2\2\2\u0147\3\2\2\2\2\u014b\3\2\2") + buf.write("\2\2\u014d\3\2\2\2\2\u014f\3\2\2\2\2\u0151\3\2\2\2\3\u0153") + buf.write("\3\2\2\2\5\u016a\3\2\2\2\7\u0173\3\2\2\2\t\u017b\3\2\2") + buf.write("\2\13\u0183\3\2\2\2\r\u0187\3\2\2\2\17\u018c\3\2\2\2\21") + buf.write("\u0191\3\2\2\2\23\u0197\3\2\2\2\25\u019c\3\2\2\2\27\u01a2") + buf.write("\3\2\2\2\31\u01a7\3\2\2\2\33\u01b0\3\2\2\2\35\u01b9\3") + buf.write("\2\2\2\37\u01bf\3\2\2\2!\u01c5\3\2\2\2#\u01cf\3\2\2\2") + buf.write("%\u01da\3\2\2\2\'\u01e3\3\2\2\2)\u01ec\3\2\2\2+\u01f4") + buf.write("\3\2\2\2-\u01fb\3\2\2\2/\u01fe\3\2\2\2\61\u0205\3\2\2") + buf.write("\2\63\u0212\3\2\2\2\65\u0217\3\2\2\2\67\u021c\3\2\2\2") + buf.write("9\u0225\3\2\2\2;\u022c\3\2\2\2=\u0233\3\2\2\2?\u0239\3") + buf.write("\2\2\2A\u023f\3\2\2\2C\u0245\3\2\2\2E\u0249\3\2\2\2G\u0250") + buf.write("\3\2\2\2I\u0255\3\2\2\2K\u0258\3\2\2\2M\u025f\3\2\2\2") + buf.write("O\u0263\3\2\2\2Q\u0268\3\2\2\2S\u0270\3\2\2\2U\u027a\3") + buf.write("\2\2\2W\u027e\3\2\2\2Y\u0287\3\2\2\2[\u028f\3\2\2\2]\u0298") + buf.write("\3\2\2\2_\u02a1\3\2\2\2a\u02a9\3\2\2\2c\u02b3\3\2\2\2") + buf.write("e\u02ba\3\2\2\2g\u02c3\3\2\2\2i\u02d4\3\2\2\2k\u02db\3") + buf.write("\2\2\2m\u02e1\3\2\2\2o\u02e8\3\2\2\2q\u02ef\3\2\2\2s\u02f6") + buf.write("\3\2\2\2u\u0304\3\2\2\2w\u0310\3\2\2\2y\u0317\3\2\2\2") + buf.write("{\u031e\3\2\2\2}\u0327\3\2\2\2\177\u032c\3\2\2\2\u0081") + buf.write("\u0339\3\2\2\2\u0083\u033f\3\2\2\2\u0085\u0344\3\2\2\2") + buf.write("\u0087\u0348\3\2\2\2\u0089\u0350\3\2\2\2\u008b\u0357\3") + buf.write("\2\2\2\u008d\u0360\3\2\2\2\u008f\u0366\3\2\2\2\u0091\u036f") + buf.write("\3\2\2\2\u0093\u0375\3\2\2\2\u0095\u037d\3\2\2\2\u0097") + buf.write("\u0382\3\2\2\2\u0099\u038b\3\2\2\2\u009b\u0393\3\2\2\2") + buf.write("\u009d\u0399\3\2\2\2\u009f\u039b\3\2\2\2\u00a1\u039d\3") + buf.write("\2\2\2\u00a3\u039f\3\2\2\2\u00a5\u03a1\3\2\2\2\u00a7\u03a3") + buf.write("\3\2\2\2\u00a9\u03a5\3\2\2\2\u00ab\u03a7\3\2\2\2\u00ad") + buf.write("\u03a9\3\2\2\2\u00af\u03ab\3\2\2\2\u00b1\u03ad\3\2\2\2") + buf.write("\u00b3\u03af\3\2\2\2\u00b5\u03b1\3\2\2\2\u00b7\u03b3\3") + buf.write("\2\2\2\u00b9\u03b5\3\2\2\2\u00bb\u03b7\3\2\2\2\u00bd\u03b9") + buf.write("\3\2\2\2\u00bf\u03bb\3\2\2\2\u00c1\u03bd\3\2\2\2\u00c3") + buf.write("\u03bf\3\2\2\2\u00c5\u03c2\3\2\2\2\u00c7\u03c5\3\2\2\2") + buf.write("\u00c9\u03c8\3\2\2\2\u00cb\u03cb\3\2\2\2\u00cd\u03ce\3") + buf.write("\2\2\2\u00cf\u03d1\3\2\2\2\u00d1\u03d4\3\2\2\2\u00d3\u03d7") + buf.write("\3\2\2\2\u00d5\u03da\3\2\2\2\u00d7\u03de\3\2\2\2\u00d9") + buf.write("\u03e1\3\2\2\2\u00db\u03e4\3\2\2\2\u00dd\u03e7\3\2\2\2") + buf.write("\u00df\u03ea\3\2\2\2\u00e1\u03ed\3\2\2\2\u00e3\u03f0\3") + buf.write("\2\2\2\u00e5\u03f3\3\2\2\2\u00e7\u03f6\3\2\2\2\u00e9\u03f8") + buf.write("\3\2\2\2\u00eb\u03fc\3\2\2\2\u00ed\u03ff\3\2\2\2\u00ef") + buf.write("\u0401\3\2\2\2\u00f1\u0403\3\2\2\2\u00f3\u0406\3\2\2\2") + buf.write("\u00f5\u0408\3\2\2\2\u00f7\u040a\3\2\2\2\u00f9\u040d\3") + buf.write("\2\2\2\u00fb\u0411\3\2\2\2\u00fd\u0420\3\2\2\2\u00ff\u0422") + buf.write("\3\2\2\2\u0101\u042c\3\2\2\2\u0103\u042e\3\2\2\2\u0105") + buf.write("\u0430\3\2\2\2\u0107\u0442\3\2\2\2\u0109\u0444\3\2\2\2") + buf.write("\u010b\u044e\3\2\2\2\u010d\u045c\3\2\2\2\u010f\u046c\3") + buf.write("\2\2\2\u0111\u0478\3\2\2\2\u0113\u047a\3\2\2\2\u0115\u047c") + buf.write("\3\2\2\2\u0117\u047e\3\2\2\2\u0119\u0490\3\2\2\2\u011b") + buf.write("\u0492\3\2\2\2\u011d\u0494\3\2\2\2\u011f\u049a\3\2\2\2") + buf.write("\u0121\u04bf\3\2\2\2\u0123\u04c4\3\2\2\2\u0125\u04c9\3") + buf.write("\2\2\2\u0127\u04e1\3\2\2\2\u0129\u04ee\3\2\2\2\u012b\u04f0") + buf.write("\3\2\2\2\u012d\u0504\3\2\2\2\u012f\u050e\3\2\2\2\u0131") + buf.write("\u051a\3\2\2\2\u0133\u051c\3\2\2\2\u0135\u051e\3\2\2\2") + buf.write("\u0137\u0528\3\2\2\2\u0139\u053a\3\2\2\2\u013b\u053f\3") + buf.write("\2\2\2\u013d\u0544\3\2\2\2\u013f\u0546\3\2\2\2\u0141\u0569") + buf.write("\3\2\2\2\u0143\u0575\3\2\2\2\u0145\u0577\3\2\2\2\u0147") + buf.write("\u057a\3\2\2\2\u0149\u057d\3\2\2\2\u014b\u0580\3\2\2\2") + buf.write("\u014d\u058b\3\2\2\2\u014f\u058f\3\2\2\2\u0151\u059d\3") + buf.write("\2\2\2\u0153\u015f\7%\2\2\u0154\u0156\n\2\2\2\u0155\u0154") + buf.write("\3\2\2\2\u0156\u0159\3\2\2\2\u0157\u0158\3\2\2\2\u0157") + buf.write("\u0155\3\2\2\2\u0158\u015a\3\2\2\2\u0159\u0157\3\2\2\2") + buf.write("\u015a\u015c\7^\2\2\u015b\u015d\7\17\2\2\u015c\u015b\3") + buf.write("\2\2\2\u015c\u015d\3\2\2\2\u015d\u015e\3\2\2\2\u015e\u0160") + buf.write("\7\f\2\2\u015f\u0157\3\2\2\2\u0160\u0161\3\2\2\2\u0161") + buf.write("\u015f\3\2\2\2\u0161\u0162\3\2\2\2\u0162\u0164\3\2\2\2") + buf.write("\u0163\u0165\n\2\2\2\u0164\u0163\3\2\2\2\u0165\u0166\3") + buf.write("\2\2\2\u0166\u0164\3\2\2\2\u0166\u0167\3\2\2\2\u0167\u0168") + buf.write("\3\2\2\2\u0168\u0169\b\2\2\2\u0169\4\3\2\2\2\u016a\u016e") + buf.write("\7%\2\2\u016b\u016d\n\2\2\2\u016c\u016b\3\2\2\2\u016d") + buf.write("\u0170\3\2\2\2\u016e\u016c\3\2\2\2\u016e\u016f\3\2\2\2") + buf.write("\u016f\u0171\3\2\2\2\u0170\u016e\3\2\2\2\u0171\u0172\b") + buf.write("\3\2\2\u0172\6\3\2\2\2\u0173\u0174\7c\2\2\u0174\u0175") + buf.write("\7n\2\2\u0175\u0176\7k\2\2\u0176\u0177\7i\2\2\u0177\u0178") + buf.write("\7p\2\2\u0178\u0179\7c\2\2\u0179\u017a\7u\2\2\u017a\b") + buf.write("\3\2\2\2\u017b\u017c\7c\2\2\u017c\u017d\7n\2\2\u017d\u017e") + buf.write("\7k\2\2\u017e\u017f\7i\2\2\u017f\u0180\7p\2\2\u0180\u0181") + buf.write("\7q\2\2\u0181\u0182\7h\2\2\u0182\n\3\2\2\2\u0183\u0184") + buf.write("\7c\2\2\u0184\u0185\7u\2\2\u0185\u0186\7o\2\2\u0186\f") + buf.write("\3\2\2\2\u0187\u0188\7c\2\2\u0188\u0189\7w\2\2\u0189\u018a") + buf.write("\7v\2\2\u018a\u018b\7q\2\2\u018b\16\3\2\2\2\u018c\u018d") + buf.write("\7d\2\2\u018d\u018e\7q\2\2\u018e\u018f\7q\2\2\u018f\u0190") + buf.write("\7n\2\2\u0190\20\3\2\2\2\u0191\u0192\7d\2\2\u0192\u0193") + buf.write("\7t\2\2\u0193\u0194\7g\2\2\u0194\u0195\7c\2\2\u0195\u0196") + buf.write("\7m\2\2\u0196\22\3\2\2\2\u0197\u0198\7e\2\2\u0198\u0199") + buf.write("\7c\2\2\u0199\u019a\7u\2\2\u019a\u019b\7g\2\2\u019b\24") + buf.write("\3\2\2\2\u019c\u019d\7e\2\2\u019d\u019e\7c\2\2\u019e\u019f") + buf.write("\7v\2\2\u019f\u01a0\7e\2\2\u01a0\u01a1\7j\2\2\u01a1\26") + buf.write("\3\2\2\2\u01a2\u01a3\7e\2\2\u01a3\u01a4\7j\2\2\u01a4\u01a5") + buf.write("\7c\2\2\u01a5\u01a6\7t\2\2\u01a6\30\3\2\2\2\u01a7\u01a8") + buf.write("\7e\2\2\u01a8\u01a9\7j\2\2\u01a9\u01aa\7c\2\2\u01aa\u01ab") + buf.write("\7t\2\2\u01ab\u01ac\7\63\2\2\u01ac\u01ad\78\2\2\u01ad") + buf.write("\u01ae\7a\2\2\u01ae\u01af\7v\2\2\u01af\32\3\2\2\2\u01b0") + buf.write("\u01b1\7e\2\2\u01b1\u01b2\7j\2\2\u01b2\u01b3\7c\2\2\u01b3") + buf.write("\u01b4\7t\2\2\u01b4\u01b5\7\65\2\2\u01b5\u01b6\7\64\2") + buf.write("\2\u01b6\u01b7\7a\2\2\u01b7\u01b8\7v\2\2\u01b8\34\3\2") + buf.write("\2\2\u01b9\u01ba\7e\2\2\u01ba\u01bb\7n\2\2\u01bb\u01bc") + buf.write("\7c\2\2\u01bc\u01bd\7u\2\2\u01bd\u01be\7u\2\2\u01be\36") + buf.write("\3\2\2\2\u01bf\u01c0\7e\2\2\u01c0\u01c1\7q\2\2\u01c1\u01c2") + buf.write("\7p\2\2\u01c2\u01c3\7u\2\2\u01c3\u01c4\7v\2\2\u01c4 \3") + buf.write("\2\2\2\u01c5\u01c6\7e\2\2\u01c6\u01c7\7q\2\2\u01c7\u01c8") + buf.write("\7p\2\2\u01c8\u01c9\7u\2\2\u01c9\u01ca\7v\2\2\u01ca\u01cb") + buf.write("\7g\2\2\u01cb\u01cc\7z\2\2\u01cc\u01cd\7r\2\2\u01cd\u01ce") + buf.write("\7t\2\2\u01ce\"\3\2\2\2\u01cf\u01d0\7e\2\2\u01d0\u01d1") + buf.write("\7q\2\2\u01d1\u01d2\7p\2\2\u01d2\u01d3\7u\2\2\u01d3\u01d4") + buf.write("\7v\2\2\u01d4\u01d5\7a\2\2\u01d5\u01d6\7e\2\2\u01d6\u01d7") + buf.write("\7c\2\2\u01d7\u01d8\7u\2\2\u01d8\u01d9\7v\2\2\u01d9$\3") + buf.write("\2\2\2\u01da\u01db\7e\2\2\u01db\u01dc\7q\2\2\u01dc\u01dd") + buf.write("\7p\2\2\u01dd\u01de\7v\2\2\u01de\u01df\7k\2\2\u01df\u01e0") + buf.write("\7p\2\2\u01e0\u01e1\7w\2\2\u01e1\u01e2\7g\2\2\u01e2&\3") + buf.write("\2\2\2\u01e3\u01e4\7f\2\2\u01e4\u01e5\7g\2\2\u01e5\u01e6") + buf.write("\7e\2\2\u01e6\u01e7\7n\2\2\u01e7\u01e8\7v\2\2\u01e8\u01e9") + buf.write("\7{\2\2\u01e9\u01ea\7r\2\2\u01ea\u01eb\7g\2\2\u01eb(\3") + buf.write("\2\2\2\u01ec\u01ed\7f\2\2\u01ed\u01ee\7g\2\2\u01ee\u01ef") + buf.write("\7h\2\2\u01ef\u01f0\7c\2\2\u01f0\u01f1\7w\2\2\u01f1\u01f2") + buf.write("\7n\2\2\u01f2\u01f3\7v\2\2\u01f3*\3\2\2\2\u01f4\u01f5") + buf.write("\7f\2\2\u01f5\u01f6\7g\2\2\u01f6\u01f7\7n\2\2\u01f7\u01f8") + buf.write("\7g\2\2\u01f8\u01f9\7v\2\2\u01f9\u01fa\7g\2\2\u01fa,\3") + buf.write("\2\2\2\u01fb\u01fc\7f\2\2\u01fc\u01fd\7q\2\2\u01fd.\3") + buf.write("\2\2\2\u01fe\u01ff\7f\2\2\u01ff\u0200\7q\2\2\u0200\u0201") + buf.write("\7w\2\2\u0201\u0202\7d\2\2\u0202\u0203\7n\2\2\u0203\u0204") + buf.write("\7g\2\2\u0204\60\3\2\2\2\u0205\u0206\7f\2\2\u0206\u0207") + buf.write("\7{\2\2\u0207\u0208\7p\2\2\u0208\u0209\7c\2\2\u0209\u020a") + buf.write("\7o\2\2\u020a\u020b\7k\2\2\u020b\u020c\7e\2\2\u020c\u020d") + buf.write("\7a\2\2\u020d\u020e\7e\2\2\u020e\u020f\7c\2\2\u020f\u0210") + buf.write("\7u\2\2\u0210\u0211\7v\2\2\u0211\62\3\2\2\2\u0212\u0213") + buf.write("\7g\2\2\u0213\u0214\7n\2\2\u0214\u0215\7u\2\2\u0215\u0216") + buf.write("\7g\2\2\u0216\64\3\2\2\2\u0217\u0218\7g\2\2\u0218\u0219") + buf.write("\7p\2\2\u0219\u021a\7w\2\2\u021a\u021b\7o\2\2\u021b\66") + buf.write("\3\2\2\2\u021c\u021d\7g\2\2\u021d\u021e\7z\2\2\u021e\u021f") + buf.write("\7r\2\2\u021f\u0220\7n\2\2\u0220\u0221\7k\2\2\u0221\u0222") + buf.write("\7e\2\2\u0222\u0223\7k\2\2\u0223\u0224\7v\2\2\u02248\3") + buf.write("\2\2\2\u0225\u0226\7g\2\2\u0226\u0227\7z\2\2\u0227\u0228") + buf.write("\7r\2\2\u0228\u0229\7q\2\2\u0229\u022a\7t\2\2\u022a\u022b") + buf.write("\7v\2\2\u022b:\3\2\2\2\u022c\u022d\7g\2\2\u022d\u022e") + buf.write("\7z\2\2\u022e\u022f\7v\2\2\u022f\u0230\7g\2\2\u0230\u0231") + buf.write("\7t\2\2\u0231\u0232\7p\2\2\u0232<\3\2\2\2\u0233\u0234") + buf.write("\7h\2\2\u0234\u0235\7c\2\2\u0235\u0236\7n\2\2\u0236\u0237") + buf.write("\7u\2\2\u0237\u0238\7g\2\2\u0238>\3\2\2\2\u0239\u023a") + buf.write("\7h\2\2\u023a\u023b\7k\2\2\u023b\u023c\7p\2\2\u023c\u023d") + buf.write("\7c\2\2\u023d\u023e\7n\2\2\u023e@\3\2\2\2\u023f\u0240") + buf.write("\7h\2\2\u0240\u0241\7n\2\2\u0241\u0242\7q\2\2\u0242\u0243") + buf.write("\7c\2\2\u0243\u0244\7v\2\2\u0244B\3\2\2\2\u0245\u0246") + buf.write("\7h\2\2\u0246\u0247\7q\2\2\u0247\u0248\7t\2\2\u0248D\3") + buf.write("\2\2\2\u0249\u024a\7h\2\2\u024a\u024b\7t\2\2\u024b\u024c") + buf.write("\7k\2\2\u024c\u024d\7g\2\2\u024d\u024e\7p\2\2\u024e\u024f") + buf.write("\7f\2\2\u024fF\3\2\2\2\u0250\u0251\7i\2\2\u0251\u0252") + buf.write("\7q\2\2\u0252\u0253\7v\2\2\u0253\u0254\7q\2\2\u0254H\3") + buf.write("\2\2\2\u0255\u0256\7k\2\2\u0256\u0257\7h\2\2\u0257J\3") + buf.write("\2\2\2\u0258\u0259\7k\2\2\u0259\u025a\7p\2\2\u025a\u025b") + buf.write("\7n\2\2\u025b\u025c\7k\2\2\u025c\u025d\7p\2\2\u025d\u025e") + buf.write("\7g\2\2\u025eL\3\2\2\2\u025f\u0260\7k\2\2\u0260\u0261") + buf.write("\7p\2\2\u0261\u0262\7v\2\2\u0262N\3\2\2\2\u0263\u0264") + buf.write("\7n\2\2\u0264\u0265\7q\2\2\u0265\u0266\7p\2\2\u0266\u0267") + buf.write("\7i\2\2\u0267P\3\2\2\2\u0268\u0269\7o\2\2\u0269\u026a") + buf.write("\7w\2\2\u026a\u026b\7v\2\2\u026b\u026c\7c\2\2\u026c\u026d") + buf.write("\7d\2\2\u026d\u026e\7n\2\2\u026e\u026f\7g\2\2\u026fR\3") + buf.write("\2\2\2\u0270\u0271\7p\2\2\u0271\u0272\7c\2\2\u0272\u0273") + buf.write("\7o\2\2\u0273\u0274\7g\2\2\u0274\u0275\7u\2\2\u0275\u0276") + buf.write("\7r\2\2\u0276\u0277\7c\2\2\u0277\u0278\7e\2\2\u0278\u0279") + buf.write("\7g\2\2\u0279T\3\2\2\2\u027a\u027b\7p\2\2\u027b\u027c") + buf.write("\7g\2\2\u027c\u027d\7y\2\2\u027dV\3\2\2\2\u027e\u027f") + buf.write("\7p\2\2\u027f\u0280\7q\2\2\u0280\u0281\7g\2\2\u0281\u0282") + buf.write("\7z\2\2\u0282\u0283\7e\2\2\u0283\u0284\7g\2\2\u0284\u0285") + buf.write("\7r\2\2\u0285\u0286\7v\2\2\u0286X\3\2\2\2\u0287\u0288") + buf.write("\7p\2\2\u0288\u0289\7w\2\2\u0289\u028a\7n\2\2\u028a\u028b") + buf.write("\7n\2\2\u028b\u028c\7r\2\2\u028c\u028d\7v\2\2\u028d\u028e") + buf.write("\7t\2\2\u028eZ\3\2\2\2\u028f\u0290\7q\2\2\u0290\u0291") + buf.write("\7r\2\2\u0291\u0292\7g\2\2\u0292\u0293\7t\2\2\u0293\u0294") + buf.write("\7c\2\2\u0294\u0295\7v\2\2\u0295\u0296\7q\2\2\u0296\u0297") + buf.write("\7t\2\2\u0297\\\3\2\2\2\u0298\u0299\7q\2\2\u0299\u029a") + buf.write("\7x\2\2\u029a\u029b\7g\2\2\u029b\u029c\7t\2\2\u029c\u029d") + buf.write("\7t\2\2\u029d\u029e\7k\2\2\u029e\u029f\7f\2\2\u029f\u02a0") + buf.write("\7g\2\2\u02a0^\3\2\2\2\u02a1\u02a2\7r\2\2\u02a2\u02a3") + buf.write("\7t\2\2\u02a3\u02a4\7k\2\2\u02a4\u02a5\7x\2\2\u02a5\u02a6") + buf.write("\7c\2\2\u02a6\u02a7\7v\2\2\u02a7\u02a8\7g\2\2\u02a8`\3") + buf.write("\2\2\2\u02a9\u02aa\7r\2\2\u02aa\u02ab\7t\2\2\u02ab\u02ac") + buf.write("\7q\2\2\u02ac\u02ad\7v\2\2\u02ad\u02ae\7g\2\2\u02ae\u02af") + buf.write("\7e\2\2\u02af\u02b0\7v\2\2\u02b0\u02b1\7g\2\2\u02b1\u02b2") + buf.write("\7f\2\2\u02b2b\3\2\2\2\u02b3\u02b4\7r\2\2\u02b4\u02b5") + buf.write("\7w\2\2\u02b5\u02b6\7d\2\2\u02b6\u02b7\7n\2\2\u02b7\u02b8") + buf.write("\7k\2\2\u02b8\u02b9\7e\2\2\u02b9d\3\2\2\2\u02ba\u02bb") + buf.write("\7t\2\2\u02bb\u02bc\7g\2\2\u02bc\u02bd\7i\2\2\u02bd\u02be") + buf.write("\7k\2\2\u02be\u02bf\7u\2\2\u02bf\u02c0\7v\2\2\u02c0\u02c1") + buf.write("\7g\2\2\u02c1\u02c2\7t\2\2\u02c2f\3\2\2\2\u02c3\u02c4") + buf.write("\7t\2\2\u02c4\u02c5\7g\2\2\u02c5\u02c6\7k\2\2\u02c6\u02c7") + buf.write("\7p\2\2\u02c7\u02c8\7v\2\2\u02c8\u02c9\7g\2\2\u02c9\u02ca") + buf.write("\7t\2\2\u02ca\u02cb\7r\2\2\u02cb\u02cc\7t\2\2\u02cc\u02cd") + buf.write("\7g\2\2\u02cd\u02ce\7v\2\2\u02ce\u02cf\7a\2\2\u02cf\u02d0") + buf.write("\7e\2\2\u02d0\u02d1\7c\2\2\u02d1\u02d2\7u\2\2\u02d2\u02d3") + buf.write("\7v\2\2\u02d3h\3\2\2\2\u02d4\u02d5\7t\2\2\u02d5\u02d6") + buf.write("\7g\2\2\u02d6\u02d7\7v\2\2\u02d7\u02d8\7w\2\2\u02d8\u02d9") + buf.write("\7t\2\2\u02d9\u02da\7p\2\2\u02daj\3\2\2\2\u02db\u02dc") + buf.write("\7u\2\2\u02dc\u02dd\7j\2\2\u02dd\u02de\7q\2\2\u02de\u02df") + buf.write("\7t\2\2\u02df\u02e0\7v\2\2\u02e0l\3\2\2\2\u02e1\u02e2") + buf.write("\7u\2\2\u02e2\u02e3\7k\2\2\u02e3\u02e4\7i\2\2\u02e4\u02e5") + buf.write("\7p\2\2\u02e5\u02e6\7g\2\2\u02e6\u02e7\7f\2\2\u02e7n\3") + buf.write("\2\2\2\u02e8\u02e9\7u\2\2\u02e9\u02ea\7k\2\2\u02ea\u02eb") + buf.write("\7|\2\2\u02eb\u02ec\7g\2\2\u02ec\u02ed\7q\2\2\u02ed\u02ee") + buf.write("\7h\2\2\u02eep\3\2\2\2\u02ef\u02f0\7u\2\2\u02f0\u02f1") + buf.write("\7v\2\2\u02f1\u02f2\7c\2\2\u02f2\u02f3\7v\2\2\u02f3\u02f4") + buf.write("\7k\2\2\u02f4\u02f5\7e\2\2\u02f5r\3\2\2\2\u02f6\u02f7") + buf.write("\7u\2\2\u02f7\u02f8\7v\2\2\u02f8\u02f9\7c\2\2\u02f9\u02fa") + buf.write("\7v\2\2\u02fa\u02fb\7k\2\2\u02fb\u02fc\7e\2\2\u02fc\u02fd") + buf.write("\7a\2\2\u02fd\u02fe\7c\2\2\u02fe\u02ff\7u\2\2\u02ff\u0300") + buf.write("\7u\2\2\u0300\u0301\7g\2\2\u0301\u0302\7t\2\2\u0302\u0303") + buf.write("\7v\2\2\u0303t\3\2\2\2\u0304\u0305\7u\2\2\u0305\u0306") + buf.write("\7v\2\2\u0306\u0307\7c\2\2\u0307\u0308\7v\2\2\u0308\u0309") + buf.write("\7k\2\2\u0309\u030a\7e\2\2\u030a\u030b\7a\2\2\u030b\u030c") + buf.write("\7e\2\2\u030c\u030d\7c\2\2\u030d\u030e\7u\2\2\u030e\u030f") + buf.write("\7v\2\2\u030fv\3\2\2\2\u0310\u0311\7u\2\2\u0311\u0312") + buf.write("\7v\2\2\u0312\u0313\7t\2\2\u0313\u0314\7w\2\2\u0314\u0315") + buf.write("\7e\2\2\u0315\u0316\7v\2\2\u0316x\3\2\2\2\u0317\u0318") + buf.write("\7u\2\2\u0318\u0319\7y\2\2\u0319\u031a\7k\2\2\u031a\u031b") + buf.write("\7v\2\2\u031b\u031c\7e\2\2\u031c\u031d\7j\2\2\u031dz\3") + buf.write("\2\2\2\u031e\u031f\7v\2\2\u031f\u0320\7g\2\2\u0320\u0321") + buf.write("\7o\2\2\u0321\u0322\7r\2\2\u0322\u0323\7n\2\2\u0323\u0324") + buf.write("\7c\2\2\u0324\u0325\7v\2\2\u0325\u0326\7g\2\2\u0326|\3") + buf.write("\2\2\2\u0327\u0328\7v\2\2\u0328\u0329\7j\2\2\u0329\u032a") + buf.write("\7k\2\2\u032a\u032b\7u\2\2\u032b~\3\2\2\2\u032c\u032d") + buf.write("\7v\2\2\u032d\u032e\7j\2\2\u032e\u032f\7t\2\2\u032f\u0330") + buf.write("\7g\2\2\u0330\u0331\7c\2\2\u0331\u0332\7f\2\2\u0332\u0333") + buf.write("\7a\2\2\u0333\u0334\7n\2\2\u0334\u0335\7q\2\2\u0335\u0336") + buf.write("\7e\2\2\u0336\u0337\7c\2\2\u0337\u0338\7n\2\2\u0338\u0080") + buf.write("\3\2\2\2\u0339\u033a\7v\2\2\u033a\u033b\7j\2\2\u033b\u033c") + buf.write("\7t\2\2\u033c\u033d\7q\2\2\u033d\u033e\7y\2\2\u033e\u0082") + buf.write("\3\2\2\2\u033f\u0340\7v\2\2\u0340\u0341\7t\2\2\u0341\u0342") + buf.write("\7w\2\2\u0342\u0343\7g\2\2\u0343\u0084\3\2\2\2\u0344\u0345") + buf.write("\7v\2\2\u0345\u0346\7t\2\2\u0346\u0347\7{\2\2\u0347\u0086") + buf.write("\3\2\2\2\u0348\u0349\7v\2\2\u0349\u034a\7{\2\2\u034a\u034b") + buf.write("\7r\2\2\u034b\u034c\7g\2\2\u034c\u034d\7f\2\2\u034d\u034e") + buf.write("\7g\2\2\u034e\u034f\7h\2\2\u034f\u0088\3\2\2\2\u0350\u0351") + buf.write("\7v\2\2\u0351\u0352\7{\2\2\u0352\u0353\7r\2\2\u0353\u0354") + buf.write("\7g\2\2\u0354\u0355\7k\2\2\u0355\u0356\7f\2\2\u0356\u008a") + buf.write("\3\2\2\2\u0357\u0358\7v\2\2\u0358\u0359\7{\2\2\u0359\u035a") + buf.write("\7r\2\2\u035a\u035b\7g\2\2\u035b\u035c\7p\2\2\u035c\u035d") + buf.write("\7c\2\2\u035d\u035e\7o\2\2\u035e\u035f\7g\2\2\u035f\u008c") + buf.write("\3\2\2\2\u0360\u0361\7w\2\2\u0361\u0362\7p\2\2\u0362\u0363") + buf.write("\7k\2\2\u0363\u0364\7q\2\2\u0364\u0365\7p\2\2\u0365\u008e") + buf.write("\3\2\2\2\u0366\u0367\7w\2\2\u0367\u0368\7p\2\2\u0368\u0369") + buf.write("\7u\2\2\u0369\u036a\7k\2\2\u036a\u036b\7i\2\2\u036b\u036c") + buf.write("\7p\2\2\u036c\u036d\7g\2\2\u036d\u036e\7f\2\2\u036e\u0090") + buf.write("\3\2\2\2\u036f\u0370\7w\2\2\u0370\u0371\7u\2\2\u0371\u0372") + buf.write("\7k\2\2\u0372\u0373\7p\2\2\u0373\u0374\7i\2\2\u0374\u0092") + buf.write("\3\2\2\2\u0375\u0376\7x\2\2\u0376\u0377\7k\2\2\u0377\u0378") + buf.write("\7t\2\2\u0378\u0379\7v\2\2\u0379\u037a\7w\2\2\u037a\u037b") + buf.write("\7c\2\2\u037b\u037c\7n\2\2\u037c\u0094\3\2\2\2\u037d\u037e") + buf.write("\7x\2\2\u037e\u037f\7q\2\2\u037f\u0380\7k\2\2\u0380\u0381") + buf.write("\7f\2\2\u0381\u0096\3\2\2\2\u0382\u0383\7x\2\2\u0383\u0384") + buf.write("\7q\2\2\u0384\u0385\7n\2\2\u0385\u0386\7c\2\2\u0386\u0387") + buf.write("\7v\2\2\u0387\u0388\7k\2\2\u0388\u0389\7n\2\2\u0389\u038a") + buf.write("\7g\2\2\u038a\u0098\3\2\2\2\u038b\u038c\7y\2\2\u038c\u038d") + buf.write("\7e\2\2\u038d\u038e\7j\2\2\u038e\u038f\7c\2\2\u038f\u0390") + buf.write("\7t\2\2\u0390\u0391\7a\2\2\u0391\u0392\7v\2\2\u0392\u009a") + buf.write("\3\2\2\2\u0393\u0394\7y\2\2\u0394\u0395\7j\2\2\u0395\u0396") + buf.write("\7k\2\2\u0396\u0397\7n\2\2\u0397\u0398\7g\2\2\u0398\u009c") + buf.write("\3\2\2\2\u0399\u039a\7*\2\2\u039a\u009e\3\2\2\2\u039b") + buf.write("\u039c\7+\2\2\u039c\u00a0\3\2\2\2\u039d\u039e\7]\2\2\u039e") + buf.write("\u00a2\3\2\2\2\u039f\u03a0\7_\2\2\u03a0\u00a4\3\2\2\2") + buf.write("\u03a1\u03a2\7}\2\2\u03a2\u00a6\3\2\2\2\u03a3\u03a4\7") + buf.write("\177\2\2\u03a4\u00a8\3\2\2\2\u03a5\u03a6\7-\2\2\u03a6") + buf.write("\u00aa\3\2\2\2\u03a7\u03a8\7/\2\2\u03a8\u00ac\3\2\2\2") + buf.write("\u03a9\u03aa\7,\2\2\u03aa\u00ae\3\2\2\2\u03ab\u03ac\7") + buf.write("\61\2\2\u03ac\u00b0\3\2\2\2\u03ad\u03ae\7\'\2\2\u03ae") + buf.write("\u00b2\3\2\2\2\u03af\u03b0\7`\2\2\u03b0\u00b4\3\2\2\2") + buf.write("\u03b1\u03b2\7(\2\2\u03b2\u00b6\3\2\2\2\u03b3\u03b4\7") + buf.write("~\2\2\u03b4\u00b8\3\2\2\2\u03b5\u03b6\7\u0080\2\2\u03b6") + buf.write("\u00ba\3\2\2\2\u03b7\u03b8\7#\2\2\u03b8\u00bc\3\2\2\2") + buf.write("\u03b9\u03ba\7?\2\2\u03ba\u00be\3\2\2\2\u03bb\u03bc\7") + buf.write(">\2\2\u03bc\u00c0\3\2\2\2\u03bd\u03be\7@\2\2\u03be\u00c2") + buf.write("\3\2\2\2\u03bf\u03c0\7-\2\2\u03c0\u03c1\7?\2\2\u03c1\u00c4") + buf.write("\3\2\2\2\u03c2\u03c3\7/\2\2\u03c3\u03c4\7?\2\2\u03c4\u00c6") + buf.write("\3\2\2\2\u03c5\u03c6\7,\2\2\u03c6\u03c7\7?\2\2\u03c7\u00c8") + buf.write("\3\2\2\2\u03c8\u03c9\7\61\2\2\u03c9\u03ca\7?\2\2\u03ca") + buf.write("\u00ca\3\2\2\2\u03cb\u03cc\7\'\2\2\u03cc\u03cd\7?\2\2") + buf.write("\u03cd\u00cc\3\2\2\2\u03ce\u03cf\7`\2\2\u03cf\u03d0\7") + buf.write("?\2\2\u03d0\u00ce\3\2\2\2\u03d1\u03d2\7(\2\2\u03d2\u03d3") + buf.write("\7?\2\2\u03d3\u00d0\3\2\2\2\u03d4\u03d5\7~\2\2\u03d5\u03d6") + buf.write("\7?\2\2\u03d6\u00d2\3\2\2\2\u03d7\u03d8\7>\2\2\u03d8\u03d9") + buf.write("\7>\2\2\u03d9\u00d4\3\2\2\2\u03da\u03db\7>\2\2\u03db\u03dc") + buf.write("\7>\2\2\u03dc\u03dd\7?\2\2\u03dd\u00d6\3\2\2\2\u03de\u03df") + buf.write("\7?\2\2\u03df\u03e0\7?\2\2\u03e0\u00d8\3\2\2\2\u03e1\u03e2") + buf.write("\7#\2\2\u03e2\u03e3\7?\2\2\u03e3\u00da\3\2\2\2\u03e4\u03e5") + buf.write("\7>\2\2\u03e5\u03e6\7?\2\2\u03e6\u00dc\3\2\2\2\u03e7\u03e8") + buf.write("\7@\2\2\u03e8\u03e9\7?\2\2\u03e9\u00de\3\2\2\2\u03ea\u03eb") + buf.write("\7(\2\2\u03eb\u03ec\7(\2\2\u03ec\u00e0\3\2\2\2\u03ed\u03ee") + buf.write("\7~\2\2\u03ee\u03ef\7~\2\2\u03ef\u00e2\3\2\2\2\u03f0\u03f1") + buf.write("\7-\2\2\u03f1\u03f2\7-\2\2\u03f2\u00e4\3\2\2\2\u03f3\u03f4") + buf.write("\7/\2\2\u03f4\u03f5\7/\2\2\u03f5\u00e6\3\2\2\2\u03f6\u03f7") + buf.write("\7.\2\2\u03f7\u00e8\3\2\2\2\u03f8\u03f9\7/\2\2\u03f9\u03fa") + buf.write("\7@\2\2\u03fa\u03fb\7,\2\2\u03fb\u00ea\3\2\2\2\u03fc\u03fd") + buf.write("\7/\2\2\u03fd\u03fe\7@\2\2\u03fe\u00ec\3\2\2\2\u03ff\u0400") + buf.write("\7A\2\2\u0400\u00ee\3\2\2\2\u0401\u0402\7<\2\2\u0402\u00f0") + buf.write("\3\2\2\2\u0403\u0404\7<\2\2\u0404\u0405\7<\2\2\u0405\u00f2") + buf.write("\3\2\2\2\u0406\u0407\7=\2\2\u0407\u00f4\3\2\2\2\u0408") + buf.write("\u0409\7\60\2\2\u0409\u00f6\3\2\2\2\u040a\u040b\7\60\2") + buf.write("\2\u040b\u040c\7,\2\2\u040c\u00f8\3\2\2\2\u040d\u040e") + buf.write("\7\60\2\2\u040e\u040f\7\60\2\2\u040f\u0410\7\60\2\2\u0410") + buf.write("\u00fa\3\2\2\2\u0411\u0412\5\u0115\u008b\2\u0412\u0413") + buf.write("\5\u0115\u008b\2\u0413\u0414\5\u0115\u008b\2\u0414\u0415") + buf.write("\5\u0115\u008b\2\u0415\u00fc\3\2\2\2\u0416\u0417\7^\2") + buf.write("\2\u0417\u0418\7w\2\2\u0418\u0419\3\2\2\2\u0419\u0421") + buf.write("\5\u00fb~\2\u041a\u041b\7^\2\2\u041b\u041c\7W\2\2\u041c") + buf.write("\u041d\3\2\2\2\u041d\u041e\5\u00fb~\2\u041e\u041f\5\u00fb") + buf.write("~\2\u041f\u0421\3\2\2\2\u0420\u0416\3\2\2\2\u0420\u041a") + buf.write("\3\2\2\2\u0421\u00fe\3\2\2\2\u0422\u0427\5\u0101\u0081") + buf.write("\2\u0423\u0426\5\u0101\u0081\2\u0424\u0426\5\u0105\u0083") + buf.write("\2\u0425\u0423\3\2\2\2\u0425\u0424\3\2\2\2\u0426\u0429") + buf.write("\3\2\2\2\u0427\u0425\3\2\2\2\u0427\u0428\3\2\2\2\u0428") + buf.write("\u0100\3\2\2\2\u0429\u0427\3\2\2\2\u042a\u042d\5\u0103") + buf.write("\u0082\2\u042b\u042d\5\u00fd\177\2\u042c\u042a\3\2\2\2") + buf.write("\u042c\u042b\3\2\2\2\u042d\u0102\3\2\2\2\u042e\u042f\t") + buf.write("\3\2\2\u042f\u0104\3\2\2\2\u0430\u0431\t\4\2\2\u0431\u0106") + buf.write("\3\2\2\2\u0432\u0434\5\u0109\u0085\2\u0433\u0435\5\u0119") + buf.write("\u008d\2\u0434\u0433\3\2\2\2\u0434\u0435\3\2\2\2\u0435") + buf.write("\u0443\3\2\2\2\u0436\u0438\5\u010b\u0086\2\u0437\u0439") + buf.write("\5\u0119\u008d\2\u0438\u0437\3\2\2\2\u0438\u0439\3\2\2") + buf.write("\2\u0439\u0443\3\2\2\2\u043a\u043c\5\u010d\u0087\2\u043b") + buf.write("\u043d\5\u0119\u008d\2\u043c\u043b\3\2\2\2\u043c\u043d") + buf.write("\3\2\2\2\u043d\u0443\3\2\2\2\u043e\u0440\5\u010f\u0088") + buf.write("\2\u043f\u0441\5\u0119\u008d\2\u0440\u043f\3\2\2\2\u0440") + buf.write("\u0441\3\2\2\2\u0441\u0443\3\2\2\2\u0442\u0432\3\2\2\2") + buf.write("\u0442\u0436\3\2\2\2\u0442\u043a\3\2\2\2\u0442\u043e\3") + buf.write("\2\2\2\u0443\u0108\3\2\2\2\u0444\u044b\5\u0111\u0089\2") + buf.write("\u0445\u0447\7)\2\2\u0446\u0445\3\2\2\2\u0446\u0447\3") + buf.write("\2\2\2\u0447\u0448\3\2\2\2\u0448\u044a\5\u0105\u0083\2") + buf.write("\u0449\u0446\3\2\2\2\u044a\u044d\3\2\2\2\u044b\u0449\3") + buf.write("\2\2\2\u044b\u044c\3\2\2\2\u044c\u010a\3\2\2\2\u044d\u044b") + buf.write("\3\2\2\2\u044e\u0455\7\62\2\2\u044f\u0451\7)\2\2\u0450") + buf.write("\u044f\3\2\2\2\u0450\u0451\3\2\2\2\u0451\u0452\3\2\2\2") + buf.write("\u0452\u0454\5\u0113\u008a\2\u0453\u0450\3\2\2\2\u0454") + buf.write("\u0457\3\2\2\2\u0455\u0453\3\2\2\2\u0455\u0456\3\2\2\2") + buf.write("\u0456\u010c\3\2\2\2\u0457\u0455\3\2\2\2\u0458\u0459\7") + buf.write("\62\2\2\u0459\u045d\7z\2\2\u045a\u045b\7\62\2\2\u045b") + buf.write("\u045d\7Z\2\2\u045c\u0458\3\2\2\2\u045c\u045a\3\2\2\2") + buf.write("\u045d\u045e\3\2\2\2\u045e\u0465\5\u0115\u008b\2\u045f") + buf.write("\u0461\7)\2\2\u0460\u045f\3\2\2\2\u0460\u0461\3\2\2\2") + buf.write("\u0461\u0462\3\2\2\2\u0462\u0464\5\u0115\u008b\2\u0463") + buf.write("\u0460\3\2\2\2\u0464\u0467\3\2\2\2\u0465\u0463\3\2\2\2") + buf.write("\u0465\u0466\3\2\2\2\u0466\u010e\3\2\2\2\u0467\u0465\3") + buf.write("\2\2\2\u0468\u0469\7\62\2\2\u0469\u046d\7d\2\2\u046a\u046b") + buf.write("\7\62\2\2\u046b\u046d\7D\2\2\u046c\u0468\3\2\2\2\u046c") + buf.write("\u046a\3\2\2\2\u046d\u046e\3\2\2\2\u046e\u0475\5\u0117") + buf.write("\u008c\2\u046f\u0471\7)\2\2\u0470\u046f\3\2\2\2\u0470") + buf.write("\u0471\3\2\2\2\u0471\u0472\3\2\2\2\u0472\u0474\5\u0117") + buf.write("\u008c\2\u0473\u0470\3\2\2\2\u0474\u0477\3\2\2\2\u0475") + buf.write("\u0473\3\2\2\2\u0475\u0476\3\2\2\2\u0476\u0110\3\2\2\2") + buf.write("\u0477\u0475\3\2\2\2\u0478\u0479\t\5\2\2\u0479\u0112\3") + buf.write("\2\2\2\u047a\u047b\t\6\2\2\u047b\u0114\3\2\2\2\u047c\u047d") + buf.write("\t\7\2\2\u047d\u0116\3\2\2\2\u047e\u047f\t\b\2\2\u047f") + buf.write("\u0118\3\2\2\2\u0480\u0482\5\u011b\u008e\2\u0481\u0483") + buf.write("\5\u011d\u008f\2\u0482\u0481\3\2\2\2\u0482\u0483\3\2\2") + buf.write("\2\u0483\u0491\3\2\2\2\u0484\u0486\5\u011b\u008e\2\u0485") + buf.write("\u0487\5\u011f\u0090\2\u0486\u0485\3\2\2\2\u0486\u0487") + buf.write("\3\2\2\2\u0487\u0491\3\2\2\2\u0488\u048a\5\u011d\u008f") + buf.write("\2\u0489\u048b\5\u011b\u008e\2\u048a\u0489\3\2\2\2\u048a") + buf.write("\u048b\3\2\2\2\u048b\u0491\3\2\2\2\u048c\u048e\5\u011f") + buf.write("\u0090\2\u048d\u048f\5\u011b\u008e\2\u048e\u048d\3\2\2") + buf.write("\2\u048e\u048f\3\2\2\2\u048f\u0491\3\2\2\2\u0490\u0480") + buf.write("\3\2\2\2\u0490\u0484\3\2\2\2\u0490\u0488\3\2\2\2\u0490") + buf.write("\u048c\3\2\2\2\u0491\u011a\3\2\2\2\u0492\u0493\t\t\2\2") + buf.write("\u0493\u011c\3\2\2\2\u0494\u0495\t\n\2\2\u0495\u011e\3") + buf.write("\2\2\2\u0496\u0497\7n\2\2\u0497\u049b\7n\2\2\u0498\u0499") + buf.write("\7N\2\2\u0499\u049b\7N\2\2\u049a\u0496\3\2\2\2\u049a\u0498") + buf.write("\3\2\2\2\u049b\u0120\3\2\2\2\u049c\u049e\7)\2\2\u049d") + buf.write("\u049f\5\u0123\u0092\2\u049e\u049d\3\2\2\2\u049f\u04a0") + buf.write("\3\2\2\2\u04a0\u049e\3\2\2\2\u04a0\u04a1\3\2\2\2\u04a1") + buf.write("\u04a2\3\2\2\2\u04a2\u04a3\7)\2\2\u04a3\u04c0\3\2\2\2") + buf.write("\u04a4\u04a5\7w\2\2\u04a5\u04a7\7)\2\2\u04a6\u04a8\5\u0123") + buf.write("\u0092\2\u04a7\u04a6\3\2\2\2\u04a8\u04a9\3\2\2\2\u04a9") + buf.write("\u04a7\3\2\2\2\u04a9\u04aa\3\2\2\2\u04aa\u04ab\3\2\2\2") + buf.write("\u04ab\u04ac\7)\2\2\u04ac\u04c0\3\2\2\2\u04ad\u04ae\7") + buf.write("W\2\2\u04ae\u04b0\7)\2\2\u04af\u04b1\5\u0123\u0092\2\u04b0") + buf.write("\u04af\3\2\2\2\u04b1\u04b2\3\2\2\2\u04b2\u04b0\3\2\2\2") + buf.write("\u04b2\u04b3\3\2\2\2\u04b3\u04b4\3\2\2\2\u04b4\u04b5\7") + buf.write(")\2\2\u04b5\u04c0\3\2\2\2\u04b6\u04b7\7N\2\2\u04b7\u04b9") + buf.write("\7)\2\2\u04b8\u04ba\5\u0123\u0092\2\u04b9\u04b8\3\2\2") + buf.write("\2\u04ba\u04bb\3\2\2\2\u04bb\u04b9\3\2\2\2\u04bb\u04bc") + buf.write("\3\2\2\2\u04bc\u04bd\3\2\2\2\u04bd\u04be\7)\2\2\u04be") + buf.write("\u04c0\3\2\2\2\u04bf\u049c\3\2\2\2\u04bf\u04a4\3\2\2\2") + buf.write("\u04bf\u04ad\3\2\2\2\u04bf\u04b6\3\2\2\2\u04c0\u0122\3") + buf.write("\2\2\2\u04c1\u04c5\n\13\2\2\u04c2\u04c5\5\u0125\u0093") + buf.write("\2\u04c3\u04c5\5\u00fd\177\2\u04c4\u04c1\3\2\2\2\u04c4") + buf.write("\u04c2\3\2\2\2\u04c4\u04c3\3\2\2\2\u04c5\u0124\3\2\2\2") + buf.write("\u04c6\u04ca\5\u0127\u0094\2\u04c7\u04ca\5\u0129\u0095") + buf.write("\2\u04c8\u04ca\5\u012b\u0096\2\u04c9\u04c6\3\2\2\2\u04c9") + buf.write("\u04c7\3\2\2\2\u04c9\u04c8\3\2\2\2\u04ca\u0126\3\2\2\2") + buf.write("\u04cb\u04cc\7^\2\2\u04cc\u04e2\7)\2\2\u04cd\u04ce\7^") + buf.write("\2\2\u04ce\u04e2\7$\2\2\u04cf\u04d0\7^\2\2\u04d0\u04e2") + buf.write("\7A\2\2\u04d1\u04d2\7^\2\2\u04d2\u04e2\7^\2\2\u04d3\u04d4") + buf.write("\7^\2\2\u04d4\u04e2\7c\2\2\u04d5\u04d6\7^\2\2\u04d6\u04e2") + buf.write("\7d\2\2\u04d7\u04d8\7^\2\2\u04d8\u04e2\7h\2\2\u04d9\u04da") + buf.write("\7^\2\2\u04da\u04e2\7p\2\2\u04db\u04dc\7^\2\2\u04dc\u04e2") + buf.write("\7t\2\2\u04dd\u04de\7^\2\2\u04de\u04e2\7v\2\2\u04df\u04e0") + buf.write("\7^\2\2\u04e0\u04e2\7x\2\2\u04e1\u04cb\3\2\2\2\u04e1\u04cd") + buf.write("\3\2\2\2\u04e1\u04cf\3\2\2\2\u04e1\u04d1\3\2\2\2\u04e1") + buf.write("\u04d3\3\2\2\2\u04e1\u04d5\3\2\2\2\u04e1\u04d7\3\2\2\2") + buf.write("\u04e1\u04d9\3\2\2\2\u04e1\u04db\3\2\2\2\u04e1\u04dd\3") + buf.write("\2\2\2\u04e1\u04df\3\2\2\2\u04e2\u0128\3\2\2\2\u04e3\u04e4") + buf.write("\7^\2\2\u04e4\u04ef\5\u0113\u008a\2\u04e5\u04e6\7^\2\2") + buf.write("\u04e6\u04e7\5\u0113\u008a\2\u04e7\u04e8\5\u0113\u008a") + buf.write("\2\u04e8\u04ef\3\2\2\2\u04e9\u04ea\7^\2\2\u04ea\u04eb") + buf.write("\5\u0113\u008a\2\u04eb\u04ec\5\u0113\u008a\2\u04ec\u04ed") + buf.write("\5\u0113\u008a\2\u04ed\u04ef\3\2\2\2\u04ee\u04e3\3\2\2") + buf.write("\2\u04ee\u04e5\3\2\2\2\u04ee\u04e9\3\2\2\2\u04ef\u012a") + buf.write("\3\2\2\2\u04f0\u04f1\7^\2\2\u04f1\u04f2\7z\2\2\u04f2\u04f4") + buf.write("\3\2\2\2\u04f3\u04f5\5\u0115\u008b\2\u04f4\u04f3\3\2\2") + buf.write("\2\u04f5\u04f6\3\2\2\2\u04f6\u04f4\3\2\2\2\u04f6\u04f7") + buf.write("\3\2\2\2\u04f7\u012c\3\2\2\2\u04f8\u04fa\5\u012f\u0098") + buf.write("\2\u04f9\u04fb\5\u0131\u0099\2\u04fa\u04f9\3\2\2\2\u04fa") + buf.write("\u04fb\3\2\2\2\u04fb\u04fd\3\2\2\2\u04fc\u04fe\5\u0137") + buf.write("\u009c\2\u04fd\u04fc\3\2\2\2\u04fd\u04fe\3\2\2\2\u04fe") + buf.write("\u0505\3\2\2\2\u04ff\u0500\5\u0135\u009b\2\u0500\u0502") + buf.write("\5\u0131\u0099\2\u0501\u0503\5\u0137\u009c\2\u0502\u0501") + buf.write("\3\2\2\2\u0502\u0503\3\2\2\2\u0503\u0505\3\2\2\2\u0504") + buf.write("\u04f8\3\2\2\2\u0504\u04ff\3\2\2\2\u0505\u012e\3\2\2\2") + buf.write("\u0506\u0508\5\u0135\u009b\2\u0507\u0506\3\2\2\2\u0507") + buf.write("\u0508\3\2\2\2\u0508\u0509\3\2\2\2\u0509\u050a\7\60\2") + buf.write("\2\u050a\u050f\5\u0135\u009b\2\u050b\u050c\5\u0135\u009b") + buf.write("\2\u050c\u050d\7\60\2\2\u050d\u050f\3\2\2\2\u050e\u0507") + buf.write("\3\2\2\2\u050e\u050b\3\2\2\2\u050f\u0130\3\2\2\2\u0510") + buf.write("\u0512\7g\2\2\u0511\u0513\5\u0133\u009a\2\u0512\u0511") + buf.write("\3\2\2\2\u0512\u0513\3\2\2\2\u0513\u0514\3\2\2\2\u0514") + buf.write("\u051b\5\u0135\u009b\2\u0515\u0517\7G\2\2\u0516\u0518") + buf.write("\5\u0133\u009a\2\u0517\u0516\3\2\2\2\u0517\u0518\3\2\2") + buf.write("\2\u0518\u0519\3\2\2\2\u0519\u051b\5\u0135\u009b\2\u051a") + buf.write("\u0510\3\2\2\2\u051a\u0515\3\2\2\2\u051b\u0132\3\2\2\2") + buf.write("\u051c\u051d\t\f\2\2\u051d\u0134\3\2\2\2\u051e\u0525\5") + buf.write("\u0105\u0083\2\u051f\u0521\7)\2\2\u0520\u051f\3\2\2\2") + buf.write("\u0520\u0521\3\2\2\2\u0521\u0522\3\2\2\2\u0522\u0524\5") + buf.write("\u0105\u0083\2\u0523\u0520\3\2\2\2\u0524\u0527\3\2\2\2") + buf.write("\u0525\u0523\3\2\2\2\u0525\u0526\3\2\2\2\u0526\u0136\3") + buf.write("\2\2\2\u0527\u0525\3\2\2\2\u0528\u0529\t\r\2\2\u0529\u0138") + buf.write("\3\2\2\2\u052a\u052c\5\u013b\u009e\2\u052b\u052a\3\2\2") + buf.write("\2\u052b\u052c\3\2\2\2\u052c\u052d\3\2\2\2\u052d\u0531") + buf.write("\7$\2\2\u052e\u0530\5\u013d\u009f\2\u052f\u052e\3\2\2") + buf.write("\2\u0530\u0533\3\2\2\2\u0531\u052f\3\2\2\2\u0531\u0532") + buf.write("\3\2\2\2\u0532\u0534\3\2\2\2\u0533\u0531\3\2\2\2\u0534") + buf.write("\u053b\7$\2\2\u0535\u0537\5\u013b\u009e\2\u0536\u0535") + buf.write("\3\2\2\2\u0536\u0537\3\2\2\2\u0537\u0538\3\2\2\2\u0538") + buf.write("\u0539\7T\2\2\u0539\u053b\5\u013f\u00a0\2\u053a\u052b") + buf.write("\3\2\2\2\u053a\u0536\3\2\2\2\u053b\u013a\3\2\2\2\u053c") + buf.write("\u053d\7w\2\2\u053d\u0540\7:\2\2\u053e\u0540\t\16\2\2") + buf.write("\u053f\u053c\3\2\2\2\u053f\u053e\3\2\2\2\u0540\u013c\3") + buf.write("\2\2\2\u0541\u0545\n\17\2\2\u0542\u0545\5\u0125\u0093") + buf.write("\2\u0543\u0545\5\u00fd\177\2\u0544\u0541\3\2\2\2\u0544") + buf.write("\u0542\3\2\2\2\u0544\u0543\3\2\2\2\u0545\u013e\3\2\2\2") + buf.write("\u0546\u054a\7$\2\2\u0547\u0549\13\2\2\2\u0548\u0547\3") + buf.write("\2\2\2\u0549\u054c\3\2\2\2\u054a\u054b\3\2\2\2\u054a\u0548") + buf.write("\3\2\2\2\u054b\u054d\3\2\2\2\u054c\u054a\3\2\2\2\u054d") + buf.write("\u0551\7*\2\2\u054e\u0550\13\2\2\2\u054f\u054e\3\2\2\2") + buf.write("\u0550\u0553\3\2\2\2\u0551\u0552\3\2\2\2\u0551\u054f\3") + buf.write("\2\2\2\u0552\u0554\3\2\2\2\u0553\u0551\3\2\2\2\u0554\u0558") + buf.write("\7+\2\2\u0555\u0557\13\2\2\2\u0556\u0555\3\2\2\2\u0557") + buf.write("\u055a\3\2\2\2\u0558\u0559\3\2\2\2\u0558\u0556\3\2\2\2") + buf.write("\u0559\u055b\3\2\2\2\u055a\u0558\3\2\2\2\u055b\u055c\7") + buf.write("$\2\2\u055c\u0140\3\2\2\2\u055d\u055e\5\u0109\u0085\2") + buf.write("\u055e\u055f\5\u0149\u00a5\2\u055f\u056a\3\2\2\2\u0560") + buf.write("\u0561\5\u010b\u0086\2\u0561\u0562\5\u0149\u00a5\2\u0562") + buf.write("\u056a\3\2\2\2\u0563\u0564\5\u010d\u0087\2\u0564\u0565") + buf.write("\5\u0149\u00a5\2\u0565\u056a\3\2\2\2\u0566\u0567\5\u010f") + buf.write("\u0088\2\u0567\u0568\5\u0149\u00a5\2\u0568\u056a\3\2\2") + buf.write("\2\u0569\u055d\3\2\2\2\u0569\u0560\3\2\2\2\u0569\u0563") + buf.write("\3\2\2\2\u0569\u0566\3\2\2\2\u056a\u0142\3\2\2\2\u056b") + buf.write("\u056d\5\u012f\u0098\2\u056c\u056e\5\u0131\u0099\2\u056d") + buf.write("\u056c\3\2\2\2\u056d\u056e\3\2\2\2\u056e\u056f\3\2\2\2") + buf.write("\u056f\u0570\5\u0149\u00a5\2\u0570\u0576\3\2\2\2\u0571") + buf.write("\u0572\5\u0135\u009b\2\u0572\u0573\5\u0131\u0099\2\u0573") + buf.write("\u0574\5\u0149\u00a5\2\u0574\u0576\3\2\2\2\u0575\u056b") + buf.write("\3\2\2\2\u0575\u0571\3\2\2\2\u0576\u0144\3\2\2\2\u0577") + buf.write("\u0578\5\u0139\u009d\2\u0578\u0579\5\u0149\u00a5\2\u0579") + buf.write("\u0146\3\2\2\2\u057a\u057b\5\u0121\u0091\2\u057b\u057c") + buf.write("\5\u0149\u00a5\2\u057c\u0148\3\2\2\2\u057d\u057e\5\u00ff") + buf.write("\u0080\2\u057e\u014a\3\2\2\2\u057f\u0581\t\20\2\2\u0580") + buf.write("\u057f\3\2\2\2\u0581\u0582\3\2\2\2\u0582\u0580\3\2\2\2") + buf.write("\u0582\u0583\3\2\2\2\u0583\u0584\3\2\2\2\u0584\u0585\b") + buf.write("\u00a6\3\2\u0585\u014c\3\2\2\2\u0586\u0588\7\17\2\2\u0587") + buf.write("\u0589\7\f\2\2\u0588\u0587\3\2\2\2\u0588\u0589\3\2\2\2") + buf.write("\u0589\u058c\3\2\2\2\u058a\u058c\7\f\2\2\u058b\u0586\3") + buf.write("\2\2\2\u058b\u058a\3\2\2\2\u058c\u058d\3\2\2\2\u058d\u058e") + buf.write("\b\u00a7\3\2\u058e\u014e\3\2\2\2\u058f\u0590\7\61\2\2") + buf.write("\u0590\u0591\7,\2\2\u0591\u0595\3\2\2\2\u0592\u0594\13") + buf.write("\2\2\2\u0593\u0592\3\2\2\2\u0594\u0597\3\2\2\2\u0595\u0596") + buf.write("\3\2\2\2\u0595\u0593\3\2\2\2\u0596\u0598\3\2\2\2\u0597") + buf.write("\u0595\3\2\2\2\u0598\u0599\7,\2\2\u0599\u059a\7\61\2\2") + buf.write("\u059a\u059b\3\2\2\2\u059b\u059c\b\u00a8\3\2\u059c\u0150") + buf.write("\3\2\2\2\u059d\u059e\7\61\2\2\u059e\u059f\7\61\2\2\u059f") + buf.write("\u05a3\3\2\2\2\u05a0\u05a2\n\21\2\2\u05a1\u05a0\3\2\2") + buf.write("\2\u05a2\u05a5\3\2\2\2\u05a3\u05a1\3\2\2\2\u05a3\u05a4") + buf.write("\3\2\2\2\u05a4\u05a6\3\2\2\2\u05a5\u05a3\3\2\2\2\u05a6") + buf.write("\u05a7\b\u00a9\3\2\u05a7\u0152\3\2\2\2G\2\u0157\u015c") + buf.write("\u0161\u0166\u016e\u0420\u0425\u0427\u042c\u0434\u0438") + buf.write("\u043c\u0440\u0442\u0446\u044b\u0450\u0455\u045c\u0460") + buf.write("\u0465\u046c\u0470\u0475\u0482\u0486\u048a\u048e\u0490") + buf.write("\u049a\u04a0\u04a9\u04b2\u04bb\u04bf\u04c4\u04c9\u04e1") + buf.write("\u04ee\u04f6\u04fa\u04fd\u0502\u0504\u0507\u050e\u0512") + buf.write("\u0517\u051a\u0520\u0525\u052b\u0531\u0536\u053a\u053f") + buf.write("\u0544\u054a\u0551\u0558\u0569\u056d\u0575\u0582\u0588") + buf.write("\u058b\u0595\u05a3\4\2\3\2\b\2\2") + return buf.getvalue() + + +class CPP14Lexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + MultiLineMacro = 1 + Directive = 2 + Alignas = 3 + Alignof = 4 + Asm = 5 + Auto = 6 + Bool = 7 + Break = 8 + Case = 9 + Catch = 10 + Char = 11 + Char16 = 12 + Char32 = 13 + Class = 14 + Const = 15 + Constexpr = 16 + Const_cast = 17 + Continue = 18 + Decltype = 19 + Default = 20 + Delete = 21 + Do = 22 + Double = 23 + Dynamic_cast = 24 + Else = 25 + Enum = 26 + Explicit = 27 + Export = 28 + Extern = 29 + False_ = 30 + Final = 31 + Float = 32 + For = 33 + Friend = 34 + Goto = 35 + If = 36 + Inline = 37 + Int = 38 + Long = 39 + Mutable = 40 + Namespace = 41 + New = 42 + Noexcept = 43 + Nullptr = 44 + Operator = 45 + Override = 46 + Private = 47 + Protected = 48 + Public = 49 + Register = 50 + Reinterpret_cast = 51 + Return = 52 + Short = 53 + Signed = 54 + Sizeof = 55 + Static = 56 + Static_assert = 57 + Static_cast = 58 + Struct = 59 + Switch = 60 + Template = 61 + This = 62 + Thread_local = 63 + Throw = 64 + True_ = 65 + Try = 66 + Typedef = 67 + Typeid = 68 + Typename = 69 + Union = 70 + Unsigned = 71 + Using = 72 + Virtual = 73 + Void = 74 + Volatile = 75 + Wchar = 76 + While = 77 + LeftParen = 78 + RightParen = 79 + LeftBracket = 80 + RightBracket = 81 + LeftBrace = 82 + RightBrace = 83 + Plus = 84 + Minus = 85 + Star = 86 + Div = 87 + Mod = 88 + Caret = 89 + And = 90 + Or = 91 + Tilde = 92 + Not = 93 + Assign = 94 + Less = 95 + Greater = 96 + PlusAssign = 97 + MinusAssign = 98 + StarAssign = 99 + DivAssign = 100 + ModAssign = 101 + XorAssign = 102 + AndAssign = 103 + OrAssign = 104 + LeftShift = 105 + LeftShiftAssign = 106 + Equal = 107 + NotEqual = 108 + LessEqual = 109 + GreaterEqual = 110 + AndAnd = 111 + OrOr = 112 + PlusPlus = 113 + MinusMinus = 114 + Comma = 115 + ArrowStar = 116 + Arrow = 117 + Question = 118 + Colon = 119 + Doublecolon = 120 + Semi = 121 + Dot = 122 + DotStar = 123 + Ellipsis = 124 + Identifier = 125 + Integerliteral = 126 + Decimalliteral = 127 + Octalliteral = 128 + Hexadecimalliteral = 129 + Binaryliteral = 130 + Integersuffix = 131 + Characterliteral = 132 + Floatingliteral = 133 + Stringliteral = 134 + Userdefinedintegerliteral = 135 + Userdefinedfloatingliteral = 136 + Userdefinedstringliteral = 137 + Userdefinedcharacterliteral = 138 + Whitespace = 139 + Newline = 140 + BlockComment = 141 + LineComment = 142 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "", + "'alignas'", "'alignof'", "'asm'", "'auto'", "'bool'", "'break'", + "'case'", "'catch'", "'char'", "'char16_t'", "'char32_t'", "'class'", + "'const'", "'constexpr'", "'const_cast'", "'continue'", "'decltype'", + "'default'", "'delete'", "'do'", "'double'", "'dynamic_cast'", + "'else'", "'enum'", "'explicit'", "'export'", "'extern'", "'false'", + "'final'", "'float'", "'for'", "'friend'", "'goto'", "'if'", + "'inline'", "'int'", "'long'", "'mutable'", "'namespace'", "'new'", + "'noexcept'", "'nullptr'", "'operator'", "'override'", "'private'", + "'protected'", "'public'", "'register'", "'reinterpret_cast'", + "'return'", "'short'", "'signed'", "'sizeof'", "'static'", "'static_assert'", + "'static_cast'", "'struct'", "'switch'", "'template'", "'this'", + "'thread_local'", "'throw'", "'true'", "'try'", "'typedef'", + "'typeid'", "'typename'", "'union'", "'unsigned'", "'using'", + "'virtual'", "'void'", "'volatile'", "'wchar_t'", "'while'", + "'('", "')'", "'['", "']'", "'{'", "'}'", "'+'", "'-'", "'*'", + "'/'", "'%'", "'^'", "'&'", "'|'", "'~'", "'!'", "'='", "'<'", + "'>'", "'+='", "'-='", "'*='", "'/='", "'%='", "'^='", "'&='", + "'|='", "'<<'", "'<<='", "'=='", "'!='", "'<='", "'>='", "'&&'", + "'||'", "'++'", "'--'", "','", "'->*'", "'->'", "'?'", "':'", + "'::'", "';'", "'.'", "'.*'", "'...'" ] + + symbolicNames = [ "", + "MultiLineMacro", "Directive", "Alignas", "Alignof", "Asm", + "Auto", "Bool", "Break", "Case", "Catch", "Char", "Char16", + "Char32", "Class", "Const", "Constexpr", "Const_cast", "Continue", + "Decltype", "Default", "Delete", "Do", "Double", "Dynamic_cast", + "Else", "Enum", "Explicit", "Export", "Extern", "False_", "Final", + "Float", "For", "Friend", "Goto", "If", "Inline", "Int", "Long", + "Mutable", "Namespace", "New", "Noexcept", "Nullptr", "Operator", + "Override", "Private", "Protected", "Public", "Register", "Reinterpret_cast", + "Return", "Short", "Signed", "Sizeof", "Static", "Static_assert", + "Static_cast", "Struct", "Switch", "Template", "This", "Thread_local", + "Throw", "True_", "Try", "Typedef", "Typeid", "Typename", "Union", + "Unsigned", "Using", "Virtual", "Void", "Volatile", "Wchar", + "While", "LeftParen", "RightParen", "LeftBracket", "RightBracket", + "LeftBrace", "RightBrace", "Plus", "Minus", "Star", "Div", "Mod", + "Caret", "And", "Or", "Tilde", "Not", "Assign", "Less", "Greater", + "PlusAssign", "MinusAssign", "StarAssign", "DivAssign", "ModAssign", + "XorAssign", "AndAssign", "OrAssign", "LeftShift", "LeftShiftAssign", + "Equal", "NotEqual", "LessEqual", "GreaterEqual", "AndAnd", + "OrOr", "PlusPlus", "MinusMinus", "Comma", "ArrowStar", "Arrow", + "Question", "Colon", "Doublecolon", "Semi", "Dot", "DotStar", + "Ellipsis", "Identifier", "Integerliteral", "Decimalliteral", + "Octalliteral", "Hexadecimalliteral", "Binaryliteral", "Integersuffix", + "Characterliteral", "Floatingliteral", "Stringliteral", "Userdefinedintegerliteral", + "Userdefinedfloatingliteral", "Userdefinedstringliteral", "Userdefinedcharacterliteral", + "Whitespace", "Newline", "BlockComment", "LineComment" ] + + ruleNames = [ "MultiLineMacro", "Directive", "Alignas", "Alignof", "Asm", + "Auto", "Bool", "Break", "Case", "Catch", "Char", "Char16", + "Char32", "Class", "Const", "Constexpr", "Const_cast", + "Continue", "Decltype", "Default", "Delete", "Do", "Double", + "Dynamic_cast", "Else", "Enum", "Explicit", "Export", + "Extern", "False_", "Final", "Float", "For", "Friend", + "Goto", "If", "Inline", "Int", "Long", "Mutable", "Namespace", + "New", "Noexcept", "Nullptr", "Operator", "Override", + "Private", "Protected", "Public", "Register", "Reinterpret_cast", + "Return", "Short", "Signed", "Sizeof", "Static", "Static_assert", + "Static_cast", "Struct", "Switch", "Template", "This", + "Thread_local", "Throw", "True_", "Try", "Typedef", "Typeid", + "Typename", "Union", "Unsigned", "Using", "Virtual", "Void", + "Volatile", "Wchar", "While", "LeftParen", "RightParen", + "LeftBracket", "RightBracket", "LeftBrace", "RightBrace", + "Plus", "Minus", "Star", "Div", "Mod", "Caret", "And", + "Or", "Tilde", "Not", "Assign", "Less", "Greater", "PlusAssign", + "MinusAssign", "StarAssign", "DivAssign", "ModAssign", + "XorAssign", "AndAssign", "OrAssign", "LeftShift", "LeftShiftAssign", + "Equal", "NotEqual", "LessEqual", "GreaterEqual", "AndAnd", + "OrOr", "PlusPlus", "MinusMinus", "Comma", "ArrowStar", + "Arrow", "Question", "Colon", "Doublecolon", "Semi", "Dot", + "DotStar", "Ellipsis", "Hexquad", "Universalcharactername", + "Identifier", "Identifiernondigit", "NONDIGIT", "DIGIT", + "Integerliteral", "Decimalliteral", "Octalliteral", "Hexadecimalliteral", + "Binaryliteral", "NONZERODIGIT", "OCTALDIGIT", "HEXADECIMALDIGIT", + "BINARYDIGIT", "Integersuffix", "Unsignedsuffix", "Longsuffix", + "Longlongsuffix", "Characterliteral", "Cchar", "Escapesequence", + "Simpleescapesequence", "Octalescapesequence", "Hexadecimalescapesequence", + "Floatingliteral", "Fractionalconstant", "Exponentpart", + "SIGN", "Digitsequence", "Floatingsuffix", "Stringliteral", + "Encodingprefix", "Schar", "Rawstring", "Userdefinedintegerliteral", + "Userdefinedfloatingliteral", "Userdefinedstringliteral", + "Userdefinedcharacterliteral", "Udsuffix", "Whitespace", + "Newline", "BlockComment", "LineComment" ] + + grammarFileName = "CPP14.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.7.1") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/persper/analytics/lsp_graph_server/jsonrpcutils.py b/persper/analytics/lsp_graph_server/jsonrpcutils.py new file mode 100644 index 00000000000..c611654b64b --- /dev/null +++ b/persper/analytics/lsp_graph_server/jsonrpcutils.py @@ -0,0 +1,43 @@ +import json +from datetime import datetime + +from jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter + + +class JsonRpcLogger(): + def __init__(self, fileName): + self._fileName = fileName + self._file = open(fileName, "wt") + + def logTX(self, message: dict): + self._file.write("{0} < {1}\n".format(datetime.now(), json.dumps(message))) + if message.get("method", None) == "shutdown": + self._file.flush() + + def logRX(self, message: dict): + self._file.write("{0} > {1}\n".format(datetime.now(), json.dumps(message))) + + def __exit__(self, exc_type, exc_value, traceback): + self._file.close() + + +class LoggedJsonRpcStreamReader(JsonRpcStreamReader): + def __init__(self, rfile, logger: JsonRpcLogger): + super().__init__(rfile) + self._logger = logger + + def listen(self, message_consumer): + def wrapper(message): + self._logger.logRX(message) + message_consumer(message) + super().listen(wrapper) + + +class LoggedJsonRpcStreamWriter(JsonRpcStreamWriter): + def __init__(self, wfile, logger: JsonRpcLogger, **json_dumps_args): + super().__init__(wfile, **json_dumps_args) + self._logger = logger + + def write(self, message): + self._logger.logTX(message) + super().write(message) diff --git a/persper/analytics/lsp_graph_server/languageclient/__init__.py b/persper/analytics/lsp_graph_server/languageclient/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/persper/analytics/lsp_graph_server/languageclient/lspclient.py b/persper/analytics/lsp_graph_server/languageclient/lspclient.py new file mode 100644 index 00000000000..c0abd633d75 --- /dev/null +++ b/persper/analytics/lsp_graph_server/languageclient/lspclient.py @@ -0,0 +1,76 @@ +""" +LSP client implementation. +""" +import logging +import threading + +from jsonrpc.dispatchers import MethodDispatcher +from jsonrpc.endpoint import Endpoint +from jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter + +from persper.analytics.lsp_graph_server.languageclient.lspcontract import MessageType, Registration +from persper.analytics.lsp_graph_server.languageclient.lspserver import LspServerStub +from persper.analytics.lsp_graph_server.jsonrpcutils import LoggedJsonRpcStreamReader, LoggedJsonRpcStreamWriter, JsonRpcLogger + +_logger = logging.getLogger(__name__) + + +class LspClient(MethodDispatcher): + def __init__(self, rx, tx, logFile: str = None): + super().__init__() + self._rpclogger = JsonRpcLogger(logFile) if logFile else None + self._streamReader = LoggedJsonRpcStreamReader(rx, self._rpclogger) if logFile else JsonRpcStreamReader(rx) + self._streamWriter = LoggedJsonRpcStreamWriter(tx, self._rpclogger) if logFile else JsonRpcStreamWriter(tx) + self._nextJsonRpcMessageId = 0 + # Some language server, e.g. cquery, only supports numerical request Ids. + self._endpoint = Endpoint(self, self._streamWriter.write, self.nextJsonRpcMessageId) + self._listenerThread = None + self._shutdownEvent = threading.Event() + self._serverStub = LspServerStub(self._endpoint) + + def nextJsonRpcMessageId(self): + self._nextJsonRpcMessageId += 1 + if self._nextJsonRpcMessageId >= 0x7FFFFFFF: + self._nextJsonRpcMessageId = 0 + return str(self._nextJsonRpcMessageId) + + def start(self): + self._listenerThread = threading.Thread(target=self._startListener, daemon=True) + self._listenerThread.start() + + def stop(self): + self._endpoint.shutdown() + self._streamReader.close() + self._streamWriter.close() + self._shutdownEvent.set() + self._listenerThread.join(timeout=30) + + def initializeServer(self): + raise NotImplementedError() + + @property + def server(self): + return self._serverStub + + def _startListener(self): + self._streamReader.listen(self._endpoint.consume) + + def m_window__show_message(self, type: MessageType, message: str): + type = MessageType(type) + _logger.info(type, message) + + def m_window__show_message_request(self, type: MessageType, message: str, actions): + type = MessageType(type) + print(type, message, actions) + return actions[0]["title"] + + def m_window__log_message(self, type: MessageType, message: str): + type = MessageType(type) + _logger.info(type, message) + + def m_text_document__publish_diagnostics(self, uri: str, diagnostics): + # ignore all diagnostic information for now. + pass + + def m_client__register_capability(self, registrations: list): + regs = [Registration.fromDict(r) for r in registrations] diff --git a/persper/analytics/lsp_graph_server/languageclient/lspcontract.py b/persper/analytics/lsp_graph_server/languageclient/lspcontract.py new file mode 100644 index 00000000000..390f0f0da42 --- /dev/null +++ b/persper/analytics/lsp_graph_server/languageclient/lspcontract.py @@ -0,0 +1,356 @@ +import pathlib +from enum import Enum +from typing import Tuple, Union +import logging +import os + +_logger = logging.getLogger(__name__) + + +class MessageType(Enum): + Error = 1 + Warning = 2 + Info = 3 + Log = 4 + + +class SymbolKind(Enum): + Unknown = 0 + File = 1 + Module = 2 + Namespace = 3 + Package = 4 + Class = 5 + Method = 6 + Property = 7 + Field = 8 + Constructor = 9 + Enum = 10 + Interface = 11 + Function = 12 + Variable = 13 + Constant = 14 + String = 15 + Number = 16 + Boolean = 17 + Array = 18 + Object = 19 + Key = 20 + Null = 21 + EnumMember = 22 + Struct = 23 + Event = 24 + Operator = 25 + TypeParameter = 26 + + # cquery extensions + # See also https://github.com/Microsoft/language-server-protocol/issues/344 + # for new SymbolKind clang/Index/IndexSymbol.h clang::index::SymbolKind + TypeAlias = 252 + Parameter = 253 + StaticMethod = 254 + Macro = 255 + + +class CompletionItemKind(Enum): + Text = 1 + Method = 2 + Function = 3 + Constructor = 4 + Field = 5 + Variable = 6 + Class = 7 + Interface = 8 + Module = 9 + Property = 10 + Unit = 11 + Value = 12 + Enum = 13 + Keyword = 14 + Snippet = 15 + Color = 16 + File = 17 + Reference = 18 + Folder = 19 + EnumMember = 20 + Constant = 21 + Struct = 22 + Event = 23 + Operator = 24 + TypeParameter = 25 + + +class LspContractObject: + def __init__(self): + pass + + def __repr__(self): + return self.__str__() + + +class Position(LspContractObject): + """ + Line position in a document (zero-based). + """ + + def __init__(self, line: int, character: int): + self.line = line + self.character = character + + def __str__(self): + return str(self.line) + "," + str(self.character) + + def __eq__(self, other: "Position"): + return self.line == other.line and self.character == other.character + + def __ne__(self, other: "Position"): + return self.line != other.line or self.character != other.character + + def __le__(self, other: "Position"): + return self.line < other.line or self.line == other.line and self.character <= other.character + + def __lt__(self, other: "Position"): + return self.line < other.line or self.line == other.line and self.character < other.character + + def toTuple(self): + return (self.line, self.character) + + def toDict(self): + return {"line": self.line, "character": self.character} + + @staticmethod + def fromDict(d: dict): + return Position(d["line"], d["character"]) + + @staticmethod + def parse(expr: Union[Tuple[int, int], "Position"]): + if isinstance(expr, Position): + return expr + if isinstance(expr, (list, tuple)): + return Position(expr[0], expr[1]) + raise TypeError("Invalid expr type.") + + +class Range(LspContractObject): + """ + A range in a text document expressed as (zero-based) start and end positions. + """ + + def __init__(self, start: Position, end: Position): + self.start = start + self.end = end + + def __str__(self): + return str(self.start) + "-" + str(self.end) + + def toDict(self): + return {"start": self.start.toDict(), + "end": self.end.toDict()} + + @staticmethod + def fromDict(d: dict): + return Range(Position.fromDict(d["start"]), Position.fromDict(d["end"])) + + +class Location(LspContractObject): + """ + Represents a location inside a resource, such as a line inside a text file. + """ + + def __init__(self, uri: str, range: Range): + self.uri = uri + self.range = range + + def __str__(self): + return str(self.uri) + ":" + str(self.range) + + def toDict(self): + return {"uri": self.uri, "range": self.range.toDict()} + + @staticmethod + def fromDict(d: dict): + return Location(d["uri"], Range.fromDict(d["range"])) + + +class TextDocument(LspContractObject): + """ + An item to transfer a text document from the client to the server. + """ + + def __init__(self, uri: str, languageId: str, version: int, text: str): + self.uri = uri + self.languageId = languageId + self.version = version + self.text = text + + def __str__(self): + return str.format("{0};{1};[{2}]", self.uri, self.languageId, self.version) + + def toDict(self): + return {"uri": self.uri, "languageId": self.languageId, "version": self.version, "text": self.text} + + @staticmethod + def fromDict(d: dict): + return TextDocument(d["uri"], d["languageId"], d["version"], d["text"]) + + @staticmethod + def loadFile(fileName: str, languageId: str, version: int = 1): + content = None + try: + with open(fileName, "rt", encoding="utf-8", errors="replace") as file: + content = file.read() + return TextDocument(TextDocument.fileNameToUri(fileName), languageId, version, content) + except Exception as ex: + raise ValueError("Cannot load from {0}.".format(fileName)) from ex + + @staticmethod + def fileNameToUri(fileName: str): + return pathlib.Path(fileName).as_uri() + + +class DocumentSymbol(LspContractObject): + """ + Represents programming constructs like variables, classes, interfaces etc. that appear in a document. Document symbols can be + hierarchical and they have two ranges: one that encloses its definition and one that points to its most interesting range, + e.g. the range of an identifier. + """ + + def __init__(self, name: str, detail: str, kind: SymbolKind, deprecated: bool, range: Range, selectionRange: Range, children: list): + self.name = name + self.detail = detail + self.kind = kind + self.deprecated = deprecated + self.range = range + """ + The range enclosing this symbol not including leading/trailing whitespace but everything else + like comments. This information is typically used to determine if the clients cursor is + inside the symbol to reveal in the symbol in the UI. + """ + self.selectionRange = selectionRange + """ + The range that should be selected and revealed when this symbol is being picked, e.g the name of a function. + Must be contained by the `range`. + """ + self.children = list(children) + + def getSymbolRange(self): + return self.selectionRange + + def __str__(self): + return str.format("{0}({1}){2}", self.name, self.kind, self.children or "") + + def toDict(self): + raise NotImplementedError() + + @staticmethod + def fromDict(d: dict): + children = () + if d.get("children"): + children = (DocumentSymbol.fromDict(cd) for cd in d["children"]) + return DocumentSymbol(d["name"], d.get("detail"), SymbolKind(d["kind"] if d["kind"] else 0), + d.get("deprecated"), Range.fromDict(d["range"]), Range.fromDict(d["selectionRange"]), + children) + + +class SymbolInformation(LspContractObject): + """ + Represents information about programming constructs like variables, classes, + interfaces etc. + """ + + def __init__(self, name: str, kind: SymbolKind, deprecated: bool, location: Location, containerName: str): + self.name = name + self.kind = kind + self.deprecated = deprecated + self.location = location + self.containerName = containerName + + def getSymbolRange(self): + return self.location.range + + def __str__(self): + return self.name + "[" + self.kind + "]" + + def toDict(self): + raise NotImplementedError() + + @staticmethod + def fromDict(d: dict): + try: + return SymbolInformation(d["name"], SymbolKind(d["kind"]) if d["kind"] else None, + d.get("deprecated"), Location.fromDict(d["location"]), + d.get("containerName")) + except Exception as ex: + raise ValueError("Invalid input: {0}.".format(d)) from ex + + +class TextDocumentContentChangeEvent(LspContractObject): + """ + An event describing a change to a text document. If range and rangeLength are omitted + the new text is considered to be the full content of the document. + """ + + def __init__(self, text: str, range: Range = None, rangeLength: int = None): + self.text = text + self.range = range + self.rangeLength = rangeLength + + def toDict(self): + d = {"text": self.text} + if self.range is not None: + d["range"] = self.range + if self.rangeLength is not None: + d["rangeLength"] = self.rangeLength + return d + + +class TextDocumentSaveReason(Enum): + """ + Represents reasons why a text document is saved. + """ + Manual = 1 + AfterDelay = 2 + FocusOut = 3 + + +class FileChangeType(Enum): + """The file event type.""" + Created = 1 + Changed = 2 + Deleted = 3 + + +class FileEvent(LspContractObject): + """ + An event describing a file change. + """ + + def __init__(self, uri: str, type: FileChangeType): + self.uri = uri + self.type = type + + def toDict(self): + d = {"uri": self.uri, "type": self.type.value} + return d + + +class Registration(LspContractObject): + """ + Represents information about programming constructs like variables, classes, + interfaces etc. + """ + + def __init__(self, id: str, method: str, registerOptions: dict): + self.id = id + self.method = method + self.registerOptions = registerOptions + + def __str__(self): + return self.id + + def toDict(self): + raise NotImplementedError() + + @staticmethod + def fromDict(d: dict): + return Registration(d["id"], d["method"], d.get("registerOptions", None)) diff --git a/persper/analytics/lsp_graph_server/languageclient/lspserver.py b/persper/analytics/lsp_graph_server/languageclient/lspserver.py new file mode 100644 index 00000000000..72dac067891 --- /dev/null +++ b/persper/analytics/lsp_graph_server/languageclient/lspserver.py @@ -0,0 +1,205 @@ +""" +LSP server contracts. +""" +import asyncio +import os +from collections.abc import Iterable +from pathlib import Path +from typing import Iterable, List, Tuple, Union + +from jsonrpc.endpoint import Endpoint + +from .lspcontract import (DocumentSymbol, FileEvent, Location, Position, + SymbolInformation, TextDocument, + TextDocumentContentChangeEvent, + TextDocumentSaveReason) + +DEFAULT_CAPABILITIES = { + "workspace": { + "applyEdit": False, + "workspaceEdit": { + "documentChanges": True + }, + "didChangeConfiguration": { + "dynamicRegistration": True + }, + "didChangeWatchedFiles": { + "dynamicRegistration": True + }, + "symbol": { + "dynamicRegistration": True, + "symbolKind": { + "valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + } + }, + "executeCommand": { + "dynamicRegistration": True + }, + "configuration": True, + "workspaceFolders": False + }, + "textDocument": { + "publishDiagnostics": None, + "synchronization": { + "dynamicRegistration": True, + "willSave": True, + "willSaveWaitUntil": True, + "didSave": True + }, + "completion": { + "dynamicRegistration": True, + "contextSupport": True, + "completionItem": { + "snippetSupport": True, + "commitCharactersSupport": True, + "documentationFormat": ["markdown", "plaintext"] + }, + "completionItemKind": { + "valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + } + }, + "hover": { + "dynamicRegistration": True, + "contentFormat": ["markdown", "plaintext"] + }, + "signatureHelp": { + "dynamicRegistration": True, + "signatureInformation": { + "documentationFormat": ["markdown", "plaintext"] + } + }, + "definition": { + "dynamicRegistration": True + }, + "references": { + "dynamicRegistration": True + }, + "documentHighlight": { + "dynamicRegistration": True + }, + "documentSymbol": { + "dynamicRegistration": True, + "symbolKind": { + "valueSet": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + }, + "hierarchicalDocumentSymbolSupport": True + }, + "codeAction": { + "dynamicRegistration": True + }, + "codeLens": { + "dynamicRegistration": True + }, + "formatting": { + "dynamicRegistration": True + }, + "rangeFormatting": { + "dynamicRegistration": True + }, + "onTypeFormatting": { + "dynamicRegistration": True + }, + "rename": { + "dynamicRegistration": True + }, + "documentLink": { + "dynamicRegistration": True + }, + "typeDefinition": { + "dynamicRegistration": True + }, + "implementation": { + "dynamicRegistration": True + }, + "colorProvider": { + "dynamicRegistration": True + } + }} + + +class LspServerStub(): + def __init__(self, endpoint: Endpoint): + if not isinstance(endpoint, Endpoint): + raise TypeError("Expect Endpoint instance.") + self._endpoint = endpoint + + def request(self, method, params=None): + return asyncio.wrap_future(self._endpoint.request(method, params)) + + def notify(self, method, params=None): + self._endpoint.notify(method, params) + + def initialize(self, processId=None, rootFolder=None, initializationOptions=None, capabilities=None): + if processId is None: + processId = os.getpid() + cap = DEFAULT_CAPABILITIES.copy() + if capabilities: + cap.update(capabilities) + rootUri = Path(rootFolder).as_uri() + return self.request("initialize", { + "processId": processId, + "rootUri": rootUri, + "initializationOptions": initializationOptions, + "capabilities": cap + }) + + def initialized(self): + self.notify("initialized") + + def shutdown(self): + return self.request("shutdown") + + def exit(self): + self.notify("exit") + + def textDocumentDidOpen(self, document: TextDocument): + self.notify("textDocument/didOpen", {"textDocument": document.toDict()}) + + def textDocumentDidClose(self, documentUri: str): + self.notify("textDocument/didClose", {"textDocument": {"uri": documentUri}}) + + async def textDocumentGotoDefinition(self, documentUri: str, position: Union[Tuple[int, int], Position]): + result = await self.request( + "textDocument/definition", + { + "textDocument": {"uri": documentUri}, + "position": Position.parse(position).toDict() + } + ) + if not result: + return [] + if isinstance(result, Iterable): + return [Location.fromDict(r) for r in result] + return [Location.fromDict(result)] + + async def textDocumentGetSymbols(self, documentUri: str) -> List[DocumentSymbol]: + result = await self.request("textDocument/documentSymbol", {"textDocument": {"uri": documentUri}}) + if not result: + return [] + + def fromDict(d: dict): + if "location" in d: + return SymbolInformation.fromDict(d) + return DocumentSymbol.fromDict(d) + + return [fromDict(d) for d in result] + + def textDocumentDidChange(self, documentUri: str, documentVersion: int, contentChanges: Iterable[TextDocumentContentChangeEvent]): + self.notify("textDocument/didChange", {"textDocument": {"uri": documentUri, "version": documentVersion}, + "contentChanges": [c.toDict() for c in contentChanges]}) + + def textDocumentWillSave(self, documentUri: str, reason: TextDocumentSaveReason = TextDocumentSaveReason.Manual): + self.notify("textDocument/willSave", {"textDocument": {"uri": documentUri}, + "reason": reason.value}) + + def textDocumentDidSave(self, documentUri: str, text: str = None): + self.notify("textDocument/didSave", {"textDocument": {"uri": documentUri}, + "text": text}) + + async def textDocumentCodeLens(self, documentUri: str): + result = await self.request("textDocument/codeLens", {"textDocument": {"uri": documentUri}}) + # We call this method only to synchronize the time sequence + return result + + def workspaceDidChangeWatchedFiles(self, changes: Iterable[FileEvent]): + self.notify("workspace/didChangeWatchedFiles", {"changes": [c.toDict() for c in changes]}) diff --git a/persper/analytics/lsp_graph_server/main.py b/persper/analytics/lsp_graph_server/main.py new file mode 100644 index 00000000000..5e05e2cf00a --- /dev/null +++ b/persper/analytics/lsp_graph_server/main.py @@ -0,0 +1,86 @@ +import asyncio +import logging +import subprocess +from os import path, sys + +from callgraph.manager import CallGraphManager +from ccls import CclsCallGraphBuilder, CclsLspClient + +# Thus you need to place cquery in rootfolder/bin/cquery, and execute ./src/main.py in root folder. +LANGUAGE_SERVER_COMMAND = "./bin/cquery --record cquerystd --log-file cquery.log --ci" # --log-all-to-stderr" +LANGUAGE_SERVER_COMMAND = "./bin/ccls -log-file=ccls.log" +SOURCE_ROOT = "./demoroot/cpp/" +CACHE_ROOT = "./demoroot/cache/" +# SOURCE_ROOT = "./demoroot/cpp-simple/" +ENTRYPOINT_PATTERN = path.join(SOURCE_ROOT, "./Eigen/src/Cholesky/*.h") +JSON_RPC_DUMP_PATH = "rpctrace.txt" + +logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s', + level=logging.INFO) +logging.getLogger('asyncio').setLevel(logging.CRITICAL) + + +async def main(): + logger = logging.getLogger() + # an simple approach to let user change entrypoint file from commandline arguments + global ENTRYPOINT_PATTERN, SOURCE_ROOT + if (len(sys.argv) == 2): + ENTRYPOINT_PATTERN = sys.argv[1] + elif (len(sys.argv) == 3): + SOURCE_ROOT, ENTRYPOINT_PATTERN = sys.argv[1], sys.argv[2] + with subprocess.Popen(LANGUAGE_SERVER_COMMAND, stdin=subprocess.PIPE, stdout=subprocess.PIPE, + creationflags=subprocess.CREATE_NEW_CONSOLE) as serverProc: + try: + logger.info("Started language server with PID: %d.", serverProc.pid) + client = CclsLspClient(serverProc.stdout, serverProc.stdin, JSON_RPC_DUMP_PATH) + client.start() + logger.info(await client.server.initialize( + rootFolder=path.abspath(SOURCE_ROOT), + initializationOptions={"cacheDirectory": path.abspath(CACHE_ROOT), + "diagnostics": {"onParse": False, "onType": False}, + "discoverSystemIncludes": True, + "enableCacheRead": True, + "enableCacheWrite": True, + "progressReportFrequencyMs": 500, + "clang": { + "excludeArgs": [], + "extraArgs": ["-nocudalib"], + "pathMappings": [], + "resourceDir": "" + } + })) + client.server.initialized() + + builder = CclsCallGraphBuilder(client) + builder.workspaceFilePatterns = [path.abspath(path.join(SOURCE_ROOT, "/**/*"))] + manager = CallGraphManager(builder) + await manager.buildGraph(ENTRYPOINT_PATTERN) + manager.graph.dumpTo("graph.txt") + + testFile = path.join(SOURCE_ROOT, "Utility.h") + if path.exists(testFile): + for i in range(2): + logger.info("Now we are replacing Utility.h with a single line of content.") + oldContent = builder.modifyFile(testFile, "// Removed file content.") + await manager.rebuildGraph([testFile]) + manager.graph.dumpTo("graph1.txt") + logger.info("Now we are restoring Utility.h.") + builder.modifyFile(testFile, oldContent) + await manager.rebuildGraph([testFile]) + manager.graph.dumpTo("graph2.txt") + + logger.info("Shutting down language server...") + await asyncio.wait_for(client.server.shutdown(), 10) + client.server.exit() + logger.info("Language server exited with code: %s.", serverProc.wait(10)) + finally: + if serverProc.returncode is None: + # kill server process to avoid infinite wait in Popen.__exit__ + serverProc.kill() + logger.warning("Killed language server.") + + +if __name__ == '__main__': + loop = asyncio.new_event_loop() + loop.set_debug(True) + loop.run_until_complete(main()) diff --git a/persper/analytics/lsp_graph_server/setup.cfg b/persper/analytics/lsp_graph_server/setup.cfg new file mode 100644 index 00000000000..68859ad034c --- /dev/null +++ b/persper/analytics/lsp_graph_server/setup.cfg @@ -0,0 +1,2 @@ +[pep8] +max-line-length = 120 diff --git a/persper/analytics/lsp_graph_server/wildcards.py b/persper/analytics/lsp_graph_server/wildcards.py new file mode 100644 index 00000000000..7c0261c3a8e --- /dev/null +++ b/persper/analytics/lsp_graph_server/wildcards.py @@ -0,0 +1,47 @@ +import os +import re + + +def translate(pat): + """ + Translate a shell PATTERN to a regular expression. + There is no way to quote meta-characters. + This version can handle **/ pattern properly, compared with fnmatch. + """ + + i, n = 0, len(pat) + res = '' + while i < n: + c = pat[i] + i = i + 1 + if c == '*': + if i < n and pat[i] == '*': + res = res + '.*?' + i = i + 1 + if i < n and pat[i] == os.sep: + i = i + 1 + else: + res = res + r'[^\\/]+' + elif c == '?': + res = res + '.' + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j + 1 + if j < n and pat[j] == ']': + j = j + 1 + while j < n and pat[j] != ']': + j = j + 1 + if j >= n: + res = res + '\\[' + else: + stuff = pat[i:j].replace('\\', '\\\\') + i = j + 1 + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] == '^': + stuff = '\\' + stuff + res = '%s[%s]' % (res, stuff) + else: + res = res + re.escape(c) + return '(?ms)' + res + '$' diff --git a/persper/analytics/patch_parser.py b/persper/analytics/patch_parser.py index e3d9e116d06..03094f614be 100644 --- a/persper/analytics/patch_parser.py +++ b/persper/analytics/patch_parser.py @@ -69,6 +69,9 @@ def parse(self, text): self.start_add() else: self.start_add() + elif line.startswith('\\'): + # Ignore \No newline at the end of file + pass else: # print("in blank") if self.in_add: diff --git a/persper/analytics/processor.py b/persper/analytics/processor.py deleted file mode 100644 index df8e2842c70..00000000000 --- a/persper/analytics/processor.py +++ /dev/null @@ -1,375 +0,0 @@ -import os -import time -import pickle -from persper.analytics.git_tools import initialize_repo -from collections import deque -import functools -print = functools.partial(print, flush=True) - -EMPTY_TREE_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - - -def _diff_with_first_parent(commit): - if len(commit.parents) == 0: - prev_commit = EMPTY_TREE_SHA - else: - prev_commit = commit.parents[0] - # commit.diff automatically detect renames - return commit.diff(prev_commit, - create_patch=True, R=True, indent_heuristic=True) - - -def _fill_change_type(diff_index): - for diff in diff_index: - if diff.new_file: - diff.change_type = 'A' - elif diff.deleted_file: - diff.change_type = 'D' - elif diff.renamed: - diff.change_type = 'R' - elif (diff.a_blob and diff.b_blob and - (diff.a_blob != diff.b_blob)): - diff.change_type = 'M' - else: - diff.change_type = 'U' - - -def _print_diff_index(diff_index): - print(" ".join([diff.change_type for diff in diff_index])) - - -def _subject(msg): - return msg.split('\n', 1)[0].lstrip().rstrip() - - -class Processor(): - - def __init__(self, repo_path): - self.repo_path = repo_path - self.repo = initialize_repo(repo_path) - self.visited = set() - self.last_processed_commit = None - - def process(self, rev=None, - from_beginning=False, num_commits=None, - from_last_processed=False, end_commit_sha=None, - into_branches=False, - max_branch_length=100, - min_branch_date=None, - checkpoint_interval=100, - skip_work=False, - verbose=True): - """ - This function supports four ways of specifying the - range of commits to process: - - Method 1: rev - Pass `rev` parameter and set both - `from_beginning` and `from_last_processed` to False. - `rev` is the revision specifier which follows - an extended SHA-1 syntax. Please refer to git-rev-parse - for viable options. `rev' should only include commits - on the master branch. - - Method 2: from_beginning & num_commits (optional) - Set `from_beginning` to True and - pass `num_commits` parameter. Using this - method, the function will start from the - very first commit on the master branch and - process the following `num_commits` commits - (also on the master branch). - - Method 3: from_last_processed & num_commits - Set `from_last_processed` to True and pass - `num_commits` parameter. Using this method, the - function will resume processing from succeeding commit of - `self.last_processed_commit` for `num_commits` commits. - - Method 4: from_last_processed & end_commit_sha - Set `from_last_processed` to True and pass - `end_commit_sha` parameter. The range of continued processing - will be `self.last_processed_commit.hexsha..end_commit_sha`. - - Args: - rev: A string, see above. - num_commits: An int, see above. - from_beginning: A boolean flag, see above. - from_last_processed: A boolean flag, see above. - end_commit_sha: A string, see above. - into_branches: A boolean flag, if True, the process function - will operate in two phases. - - In the first phase, a call commit graph is contructed - by traversing the specified range of commits on the master - branch. Merge commits are detected and recorded if the - start commit (on master) and end/merge commit of the - corresponding branch are both within the range of - traversal. Those recorded merge commits do not - get any credits (thus they are not present in - self.history data structure). - - In the second phase, it traverses all the branches detected - in the first phase and assign them due credits. - - max_branch_length: An int, the maximum number of commits - to trace back before abortion. - min_branch_date: A python time object, stop backtracing if - a commit is authored before this time. - checkpoint_interval: An int. - """ - if not from_last_processed: - self._reset_state() - self.merge_commits = deque() - - # Method 2 - if from_beginning: - if num_commits is None: - num_commits = 0 - self.commits = list( - self.repo.iter_commits(first_parent=True))[-num_commits:] - - elif from_last_processed: - if not self.last_processed_commit: - print("No history exists yet, terminated.") - return - - # Method 4 - if end_commit_sha: - rev = self.last_processed_commit.hexsha + '..' + end_commit_sha - self.commits = list(self.repo.iter_commits( - rev, first_parent=True)) - # Method 3 - elif num_commits: - rev = self.last_processed_commit.hexsha + '..master' - self.commits = list(self.repo.iter_commits( - rev, first_parent=True))[-num_commits:] - else: - print("Both end_commit_sha and num_commits are None.") - return - - else: - # Method 1 - self.commits = list(self.repo.iter_commits(rev, first_parent=True)) - - if len(self.commits) > 0: - self.last_processed_commit = self.commits[0] - else: - print("The range specified is empty, terminated.") - return - - counter = 1 - start = time.time() - - # 1st phase - for commit in reversed(self.commits): - sha = commit.hexsha - self.visited.add(sha) - self._start_process_commit(commit) - - if verbose: - print('------ No.{} {} {} {} ------'.format( - counter, sha, _subject(commit.message), - time.strftime( - "%b %d %Y", time.gmtime(commit.authored_date) - )) - ) - else: - print('------ No.{} {} ------'.format(counter, sha)) - if counter % 100 == 0: - print('------ Used time: {} ------'.format( - time.time() - start)) - - if counter % checkpoint_interval == 0: - repo_name = os.path.basename(self.repo_path.rstrip('/')) - self.save(repo_name + '-1st-' + str(counter) + '.pickle') - - if into_branches: - is_merge_commit = len(commit.parents) > 1 - if is_merge_commit: - self.merge_commits.append(commit) - """ - is_merge_commit = self._detect_branch( - commit, max_branch_length, min_branch_date) - """ - else: - is_merge_commit = False - - if not skip_work: - # generate diff_index by diff commit with its first parent - diff_index = _diff_with_first_parent(commit) - - # figure out the change type of each entry in diff_index - _fill_change_type(diff_index) - - if verbose: - _print_diff_index(diff_index) - - for diff in diff_index: - if diff.change_type == 'U': - print('Unknown change type encountered.') - continue - - if diff.change_type == 'A': - self.on_add(diff, commit, is_merge_commit) - - elif diff.change_type == 'D': - self.on_delete(diff, commit, is_merge_commit) - - elif diff.change_type == 'R': - self.on_rename(diff, commit, is_merge_commit) - - else: - self.on_modify(diff, commit, is_merge_commit) - - counter += 1 - - # 2nd phase - if into_branches: - - commit_cnt = 1 - branch_cnt = 1 - start = time.time() - - print('\n------- 2nd phase -------\n') - - while len(self.merge_commits) > 0: - mc = self.merge_commits.popleft() - cur_commit = mc.parents[1] - branch_length = 0 - valid_branch = False - - while True: - - # stop tracing back along this branch - # if cur_commit has been visited - if cur_commit.hexsha in self.visited: - break - - # stop if we have reached time boundary - authored_date = time.gmtime(cur_commit.authored_date) - if min_branch_date and min_branch_date > authored_date: - break - - # stop if we have reached max_branch_length - if branch_length >= max_branch_length: - break - - # stop if we have reached the very first commit - if len(cur_commit.parents) == 0: - break - - # will process at least one commit for this branch - valid_branch = True - - # process this commit - if verbose: - print('------ Commit No.{} '.format(commit_cnt), - 'Branch No.{} {} {} {} ------'.format( - branch_cnt, - cur_commit.hexsha, - _subject(cur_commit.message), - time.strftime( - "%b %d %Y", - time.gmtime(cur_commit.authored_date) - ) - ) - ) - else: - print('------ Commit No.{} '.format(commit_cnt), - 'Branch No.{} {}------'.format( - branch_cnt, cur_commit.hexsha)) - - if commit_cnt % 100 == 0: - print('------ Used time: {} ------'.format( - time.time() - start)) - - if commit_cnt % checkpoint_interval == 0: - repo_name = os.path.basename( - self.repo_path.rstrip('/')) - self.save( - repo_name + '-2nd-' + str(counter) + '.pickle') - - self.visited.add(cur_commit.hexsha) - # add to queue if prev_commit is a merge commit - if len(cur_commit.parents) == 2: - self.merge_commits.append(cur_commit) - - if not skip_work: - self._start_process_commit(cur_commit) - diff_index = _diff_with_first_parent(cur_commit) - _fill_change_type(diff_index) - for diff in diff_index: - if diff.change_type == 'U': - print('Unknown change type encountered.') - continue - if diff.change_type == 'A': - self.on_add2(diff, cur_commit) - elif diff.change_type == 'D': - self.on_delete2(diff, cur_commit) - elif diff.change_type == 'R': - self.on_rename2(diff, cur_commit) - else: - self.on_modify2(diff, cur_commit) - - # get next commit - prev_commit = cur_commit.parents[0] - - cur_commit = prev_commit - branch_length += 1 - commit_cnt += 1 - - if valid_branch: - branch_cnt += 1 - - repo_name = os.path.basename(self.repo_path.rstrip('/')) - self.save(repo_name + '-finished.pickle') - - def _reset_state(self): - self.visited = set() - self.last_processed_commit = None - - def _start_process_commit(self, commit): - pass - - def set_repo_path(self, repo_path): - self.repo_path = repo_path - self.repo = initialize_repo(repo_path) - self.last_processed_commit = self.repo.commit(self.last_sha) - - def on_add(self, diff, commit, is_merge_commit): - return 0 - - def on_delete(self, diff, commit, is_merge_commit): - return 0 - - def on_rename(self, diff, commit, is_merge_commit): - return 0 - - def on_modify(self, diff, commit, is_merge_commit): - return 0 - - def on_add2(self, diff, commit): - return 0 - - def on_delete2(self, diff, commit): - return 0 - - def on_rename2(self, diff, commit): - return 0 - - def on_modify2(self, diff, commit): - return 0 - - def __getstate__(self): - state = { - 'visited': self.visited, - 'last_sha': self.last_processed_commit.hexsha - } - return state - - def __setstate__(self, state): - self.__dict__.update(state) - - def save(self, fname): - with open(fname, 'wb+') as f: - pickle.dump(self, f) diff --git a/persper/analytics/score.py b/persper/analytics/score.py new file mode 100644 index 00000000000..42dcb1686c6 --- /dev/null +++ b/persper/analytics/score.py @@ -0,0 +1,29 @@ +from typing import Dict, List +import numpy as np + + +def normalize(scores: Dict[str, float]) -> Dict[str, float]: + normalized_scores = {} + score_sum = 0 + for _, score in scores.items(): + score_sum += score + + for idx in scores: + normalized_scores[idx] = scores[idx] / score_sum + return normalized_scores + + +def commit_overall_scores(commit_devranks: Dict[str, float], + clf_results: Dict[str, List[float]], + label_weights: List[float], + top_one=False) -> Dict[str, float]: + overall_scores = {} + for sha, dr in commit_devranks.items(): + assert sha in clf_results, "Commit %s does not have label." + if top_one: + top_idx = np.argmax(clf_results[sha]) + overall_scores[sha] = label_weights[top_idx] * dr + else: + overall_scores[sha] = np.dot(clf_results[sha], label_weights) * dr + + return normalize(overall_scores) diff --git a/persper/analytics/srcml.py b/persper/analytics/srcml.py index 7b90d9090c5..f64cce5c95c 100755 --- a/persper/analytics/srcml.py +++ b/persper/analytics/srcml.py @@ -50,33 +50,38 @@ def transform_dir(input_dir, output_dir, extensions=('.c', '.h')): print("Tranformation completed, {} processed.".format(counter)) -def transform_src_to_tree(source_code, ext='.c'): - root = None +def src_to_tree(filename, src): + """ + Assume src is UTF-8 encoded. + the temp file needs to have the right ext so that srcml can open it + """ + _, ext = os.path.splitext(filename) + if ext == '': + print("ERROR: src_to_tree can't extract file extension.") + return None + try: - f = tempfile.NamedTemporaryFile(mode='wb+', delete=False) - f.write(source_code.encode('utf-8', 'replace')) + f = tempfile.NamedTemporaryFile(mode='wb+', suffix=ext, delete=False) + f.write(src.encode('utf-8', 'replace')) f.close() except UnicodeEncodeError as e: - print("UnicodeEncodeError in transform_src_to_tree!") + print("ERROR: src_to_tree encounters UnicodeEncodeError.") if not f.closed: f.close() os.remove(f.name) return None - # rename so that srcml can open it - new_fname = f.name + ext - os.rename(f.name, new_fname) xml_path = f.name + ".xml" - cmd = 'srcml {} --position -o {}'.format(new_fname, xml_path) + cmd = 'srcml {} --position --filename {} -o {}'.format(f.name, '\"/' + filename + '\"', xml_path) subprocess.call(cmd, shell=True) try: root = etree.parse(xml_path).getroot() except: - print("Unable to parse xml file!") + print("ERROR: src_to_tree unable to parse xml file.") finally: if not f.closed: f.close() - os.remove(new_fname) + os.remove(f.name) if os.path.exists(xml_path): os.remove(xml_path) diff --git a/persper/graphs/analyzer.py b/persper/graphs/analyzer.py deleted file mode 100644 index 66b0ed988d5..00000000000 --- a/persper/graphs/analyzer.py +++ /dev/null @@ -1,268 +0,0 @@ -import os -import time -import pickle -from persper.graphs.devrank import devrank -from persper.graphs.git_tools import get_contents, _diff_with_first_parent -from persper.graphs.iterator import RepoIterator -from persper.util.bidict import bidict - - -def print_overview(commits, branch_commits): - print('----- Overview ------') - print('# of commits on master: %d' % len(commits)) - print('# of commits on branch: %d' % len(branch_commits)) - - -def print_commit_info(phase, idx, commit, start_time, verbose): - if verbose: - print('----- No.%d %s %s %s -----' % - (idx, commit.hexsha, subject_of(commit.message), - time.strftime("%b %d %Y", time.gmtime(commit.authored_date)))) - else: - print('----- No.%d %s on %s -----' % (idx, commit.hexsha, phase)) - - if idx % 100 == 0: - print('------ Used time: %.3f -----' % (time.time() - start_time)) - - -def subject_of(msg): - return msg.split('\n', 1)[0].lstrip().rstrip() - - -def _get_fnames(diff): - if diff.new_file: - # change type 'A' - old_fname = None - new_fname = diff.b_blob.path - elif diff.deleted_file: - # change type 'D' - old_fname = diff.a_blob.path - new_fname = None - elif diff.renamed: - # change type 'R' - old_fname = diff.rename_from - new_fname = diff.rename_to - elif (diff.a_blob and diff.b_blob and - (diff.a_blob != diff.b_blob)): - # change type 'M' - old_fname = new_fname = diff.b_blob.path - else: - # change type 'U' - return None, None - - return old_fname, new_fname - - -def is_merge_commit(commit): - return len(commit.parents) > 1 - - -def _normalize_shares(email_to_share): - share_sum = 0 - for email, share in email_to_share.items(): - share_sum += share - - for email in email_to_share: - email_to_share[email] /= share_sum - - -class Analyzer: - - def __init__(self, repo_path, graph_server): - self.graph_server = graph_server - self.ri = RepoIterator(repo_path) - self.history = {} - self.id_map = {} - self.ordered_shas = [] - self.graph = None - - def analyze(self, rev=None, - from_beginning=False, - num_commits=None, - continue_iter=False, - end_commit_sha=None, - into_branches=False, - max_branch_length=100, - min_branch_date=None, - checkpoint_interval=1000, - verbose=False): - - if not continue_iter: - self.reset_state() - self.graph_server.reset_graph() - - commits, branch_commits = \ - self.ri.iter(rev=rev, - from_beginning=from_beginning, - num_commits=num_commits, - continue_iter=continue_iter, - end_commit_sha=end_commit_sha, - into_branches=into_branches, - max_branch_length=max_branch_length, - min_branch_date=min_branch_date) - - print_overview(commits, branch_commits) - start_time = time.time() - - for idx, commit in enumerate(reversed(commits), 1): - phase = 'main' - print_commit_info(phase, idx, commit, start_time, verbose) - self.analyze_master_commit(commit) - self.autosave(phase, idx, checkpoint_interval) - - for idx, commit in enumerate(branch_commits, 1): - phase = 'branch' - print_commit_info(phase, idx, commit, start_time, verbose) - self.analyze_branch_commit(commit) - self.autosave(phase, idx, checkpoint_interval) - - self.autosave('finished', 0, 1) - - def _analyze_commit(self, commit, ccg_func): - sha = commit.hexsha - self.ordered_shas.append(sha) - self.history[sha] = {} - self.id_map[sha] = {} - diff_index = _diff_with_first_parent(commit) - - for diff in diff_index: - old_fname, new_fname = _get_fnames(diff) - # Cases we don't handle - # 1. Both file names are None - if old_fname is None and new_fname is None: - print('WARNING: unknown change type encountered.') - continue - - # 2. Either old_fname and new_fname doesn't pass filter - if ((old_fname and not self.graph_server.filter_file(old_fname)) or - (new_fname and not self.graph_server.filter_file(new_fname))): - continue - - old_src = new_src = None - - if old_fname: - old_src = get_contents( - self.ri.repo, commit.parents[0], old_fname) - - if new_fname: - new_src = get_contents(self.ri.repo, commit, new_fname) - - if old_src or new_src: - # Delegate actual work to graph_server - id_to_lines, id_map = ccg_func( - old_fname, old_src, new_fname, new_src, diff.diff) - - self.history[sha].update(id_to_lines) - self.id_map[sha].update(id_map) - - def analyze_master_commit(self, commit): - self._analyze_commit(commit, self.graph_server.update_graph) - - def analyze_branch_commit(self, commit): - self._analyze_commit(commit, self.graph_server.parse) - - def reset_state(self): - self.history = {} - self.id_map = {} - self.ordered_shas = [] - self.graph = None - - def build_history(self, - commits, - phase='build-history', - checkpoint_interval=1000, - verbose=False): - """A helper function to access `analyze_branch_commit`""" - print_overview([], commits) - start_time = time.time() - - for idx, commit in enumerate(commits, 1): - print_commit_info(phase, idx, commit, start_time, verbose) - self.analyze_branch_commit(commit) - self.autosave(phase, idx, checkpoint_interval) - - self.autosave(phase, 0, 1) - - def aggregate_id_map(self): - final_map = bidict() - for sha in self.ordered_shas: - for old_fid, new_fid in self.id_map[sha].items(): - if old_fid in final_map.inverse: - # Make a copy so as not to remove list elements during iteration - existing_fids = final_map.inverse[old_fid].copy() - for ex_fid in existing_fids: - final_map[ex_fid] = new_fid - final_map[old_fid] = new_fid - return dict(final_map) - - def cache_graph(self): - self.graph = self.graph_server.get_graph() - - def compute_function_share(self, alpha): - self.cache_graph() - return devrank(self.graph, alpha=alpha) - - def compute_commit_share(self, alpha): - commit_share = {} - func_share = self.compute_function_share(alpha) - final_map = self.aggregate_id_map() - - # Compute final history using final_map - final_history = {} - for sha in self.history: - final_history[sha] = {} - for fid, num_lines in self.history[sha].items(): - if fid in final_map: - final_history[sha][final_map[fid]] = num_lines - else: - final_history[sha][fid] = num_lines - - # add edits by each commit up to compute total edits - total_edits = {} - for sha in final_history: - for fid, num_lines in final_history[sha].items(): - if fid in total_edits: - total_edits[fid] += num_lines - else: - total_edits[fid] = num_lines - - # Propagate to commit level - for sha in final_history: - commit_share[sha] = 0 - for fid in final_history[sha]: - if fid in func_share: - commit_share[sha] += \ - (final_history[sha][fid] / total_edits[fid] * - func_share[fid]) - - return commit_share - - def compute_developer_share(self, alpha): - dev_share = {} - commit_share = self.compute_commit_share(alpha) - - for sha in commit_share: - email = self.ri.repo.commit(sha).author.email - if email in dev_share: - dev_share[email] += commit_share[sha] - else: - dev_share[email] = commit_share[sha] - return dev_share - - def locrank_commits(self): - loc = {} - for sha in self.history: - loc[sha] = 0 - for func in self.history[sha]: - loc[sha] += self.history[sha][func] - return sorted(loc.items(), key=lambda x: x[1], reverse=True) - - def save(self, fname): - with open(fname, 'wb+') as f: - pickle.dump(self, f) - - def autosave(self, phase, idx, checkpoint_interval): - if idx % checkpoint_interval == 0: - repo_name = os.path.basename(self.ri.repo_path.rstrip('/')) - fname = repo_name + '-' + phase + '-' + str(idx) + '.pickle' - self.save(fname) diff --git a/persper/graphs/c.py b/persper/graphs/c.py deleted file mode 100644 index 787fc29b732..00000000000 --- a/persper/graphs/c.py +++ /dev/null @@ -1,108 +0,0 @@ -import re -import networkx as nx -from persper.graphs.patch_parser import PatchParser -from persper.graphs.srcml import transform_src_to_tree -from persper.graphs.call_graph.c import update_call_graph_c, get_func_ranges_c -from persper.graphs.detect_change import get_changed_functions -from persper.graphs.inverse_diff import inverse_diff -from persper.graphs.graph_server import GraphServer - - -class CGraphServer(GraphServer): - def __init__(self, filename_regex_strs): - self._graph = nx.DiGraph() - self._filename_regexes = [re.compile(regex_str) for regex_str in filename_regex_strs] - self._pparser = PatchParser() - - def update_graph(self, old_filename, old_src, new_filename, new_src, patch): - # on add, rename, modify: update_roots = [new_root] - # on delete: update_roots = [] - update_root = [] - - # on add: modified_func = {} - # on rename, modify, delete: modified_func is computed by - # parsing patch and call get_changed_functions - modified_func = {} - - if old_src is not None: - old_root = transform_src_to_tree(old_src) - if old_root is None: - return {}, {} - - modified_func = get_changed_functions( - *get_func_ranges_c(old_root), - *self._parse_patch(patch)) - - if new_src is not None: - new_root = transform_src_to_tree(new_src) - if new_root is None: - return {}, {} - update_root = [new_root] - - # update call graph - # if on delete, then new_func is expected to be an empty dict - new_func = update_call_graph_c(self.graph, update_root, modified_func) - - # return history - return {**new_func, **modified_func}, {} - - def parse(self, old_filename, old_src, new_filename, new_src, patch): - """Return None if there is an error""" - forward_stats = {} - bckward_stats = {} - - adds, dels = self._parse_patch(patch) - if adds is None or dels is None: - return None, {} - - if old_src is not None: - old_root = transform_src_to_tree(old_src) - if old_root is None: - return None, {} - - forward_stats = get_changed_functions( - *get_func_ranges_c(old_root), adds, dels) - - if new_src is not None: - inv_adds, inv_dels = inverse_diff(adds, dels) - new_root = transform_src_to_tree(new_src) - if new_root is None: - return None, {} - - bckward_stats = get_changed_functions( - *get_func_ranges_c(new_root), inv_adds, inv_dels) - - """ - forward_stats and bckward_stats might have different values - for the same function, as an example, please refer to - `str_equals` function in the following link. In this case, - we'll stick with forward_stats (override bckward_stats). - https://github.com/UltimateBeaver/test_feature_branch/commit/364d5cc49aeb2e354da458924ce84c0ab731ac77 - """ - bckward_stats.update(forward_stats) - return bckward_stats, {} - - def get_graph(self): - return self.graph - - def reset_graph(self): - self.graph = nx.DiGraph() - - def filter_file(self, filename): - for regex in self._filename_regexes: - if not regex.match(filename): - return False - return True - - def config(self, param): - pass - - def _parse_patch(self, patch): - adds, dels = None, None - try: - adds, dels = self._pparser.parse(patch.decode('utf-8', 'replace')) - except UnicodeDecodeError: - print("UnicodeDecodeError when parsing patch!") - except: - print("Unknown error when parsing patch!") - return adds, dels diff --git a/persper/graphs/call_commit_graph.py b/persper/graphs/call_commit_graph.py deleted file mode 100644 index 691385d9787..00000000000 --- a/persper/graphs/call_commit_graph.py +++ /dev/null @@ -1,368 +0,0 @@ -import sys -import networkx as nx -from persper.graphs.processor import Processor, _diff_with_first_parent, _fill_change_type -from persper.graphs.patch_parser import PatchParser -from persper.graphs.srcml import transform_src_to_tree -from persper.graphs.detect_change import get_changed_functions -from persper.graphs.call_graph.c import update_call_graph_c, get_func_ranges_c -from persper.graphs.call_graph.java import update_call_graph_java, get_func_ranges_java -from persper.graphs.call_graph.java import prepare_env -from persper.graphs.devrank import devrank -from persper.graphs.git_tools import get_contents - - -def _inverse_diff_result(adds, dels): - """ - >>> adds = [[11, 1], [32, 1]] - >>> dels = [[11, 11], [31, 32]] - >>> _inverse_diff_result(adds, dels) - ([[10, 1], [30, 2]], [[11, 11], [31, 31]]) - """ - diff = 0 - add_ptr, del_ptr = 0, 0 - num_adds, num_dels = len(adds), len(dels) - inv_adds, inv_dels = [], [] - - def _handle_a(a): - nonlocal diff - inv_dels.append([diff + a[0] + 1, diff + a[0] + a[1]]) - diff += a[1] - - def _handle_d(d): - nonlocal diff - inv_adds.append([diff + d[0] - 1, d[1] - d[0] + 1]) - diff -= (d[1] - d[0] + 1) - - while add_ptr < num_adds or del_ptr < num_dels: - if add_ptr < num_adds and del_ptr < num_dels: - if adds[add_ptr][0] < dels[del_ptr][0]: - _handle_a(adds[add_ptr]) - add_ptr += 1 - else: - _handle_d(dels[del_ptr]) - del_ptr += 1 - elif add_ptr < num_adds and del_ptr >= num_dels: - # we have finished dels - _handle_a(adds[add_ptr]) - add_ptr += 1 - else: - # we have finished adds - _handle_d(dels[del_ptr]) - del_ptr += 1 - - return inv_adds, inv_dels - - -def _normalize_shares(email_to_share): - share_sum = 0 - for email, share in email_to_share.items(): - share_sum += share - - for email in email_to_share: - email_to_share[email] /= share_sum - - -class CallCommitGraph(Processor): - - def __init__(self, repo_path, lang='c'): - super().__init__(repo_path) - self.G = None - self.lang = lang - if lang == 'c': - self.exts = ('.c', '.h') - elif lang == 'java': - self.exts = ('.java',) - else: - print("Invalid language option, terminated.") - sys.exit(-1) - self.env = {} - self.history = {} - self.share = {} - self.patch_parser = PatchParser() - - def _reset_state(self): - super()._reset_state() - self.G = nx.DiGraph() - self.history = {} - - def _start_process_commit(self, commit): - self.history[commit.hexsha] = {} - if self.lang == 'java': - new_roots = [] - diff_index = _diff_with_first_parent(commit) - _fill_change_type(diff_index) - for diff in diff_index: - if diff.change_type in ['A', 'M']: - fname = diff.b_blob.path - elif diff.change_type == 'R': - fname = diff.rename_to - else: - continue - - if self.fname_filter(fname): - root = self._get_xml_root(commit, fname) - prepare_env(root, env=self.env) - - def on_add(self, diff, commit, is_merge_commit): - old_fname = None - new_fname = diff.b_blob.path - return self._first_phase(diff, commit, - is_merge_commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_delete(self, diff, commit, is_merge_commit): - old_fname = diff.a_blob.path - new_fname = None - return self._first_phase(diff, commit, - is_merge_commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_rename(self, diff, commit, is_merge_commit): - new_fname = diff.rename_to - old_fname = diff.rename_from - return self._first_phase(diff, commit, - is_merge_commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_modify(self, diff, commit, is_merge_commit): - fname = diff.b_blob.path - return self._first_phase(diff, commit, - is_merge_commit, - old_fname=fname, - new_fname=fname) - - def on_add2(self, diff, commit): - old_fname = None - new_fname = diff.b_blob.path - return self._second_phase(diff, commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_delete2(self, diff, commit): - old_fname = diff.a_blob.path - new_fname = None - return self._second_phase(diff, commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_rename2(self, diff, commit): - new_fname = diff.rename_to - old_fname = diff.rename_from - return self._second_phase(diff, commit, - old_fname=old_fname, - new_fname=new_fname) - - def on_modify2(self, diff, commit): - fname = diff.b_blob.path - return self._second_phase(diff, commit, - old_fname=fname, - new_fname=fname) - - def fname_filter(self, fname): - for ext in self.exts: - if fname.endswith(ext): - return True - return False - - def _get_xml_root(self, commit, fname): - if self.lang == 'c': - return transform_src_to_tree( - get_contents(self.repo, commit, fname)) - elif self.lang == 'java': - return transform_src_to_tree( - get_contents(self.repo, commit, fname), ext='.java') - - def _first_phase(self, diff, commit, is_merge_commit, - old_fname=None, new_fname=None): - - if ((old_fname and self.fname_filter(old_fname)) or - (new_fname and self.fname_filter(new_fname))): - - # on add, rename, modify: update_roots = [new_root] - # on delete: update_roots = [] - update_roots = [] - - # on add: modified_func = {} - # on rename, modify, delete: modified_func is computed by - # parsing patch and call get_changed_functions - modified_func = {} - - # do not need to parse patch if on add - if old_fname is not None: - additions, deletions = self.parse_patch(diff.diff) - if additions is None or deletions is None: - return -1 - - old_root = self._get_xml_root(commit.parents[0], old_fname) - if old_root is None: - return -1 - - if self.lang == 'c': - modified_func = get_changed_functions( - *get_func_ranges_c(old_root), additions, deletions) - elif self.lang == 'java': - modified_func = get_changed_functions( - *get_func_ranges_java(old_root), additions, deletions) - - # parse new src to tree - if new_fname is not None: - new_root = self._get_xml_root(commit, new_fname) - if new_root is None: - return -1 - update_roots.append(new_root) - - # update call graph - # if on delete, then new_func is expected to be an empty dict - if self.lang == 'c': - new_func = update_call_graph_c( - self.G, update_roots, modified_func) - elif self.lang == 'java': - new_func = update_call_graph_java( - self.G, update_roots, modified_func, env=self.env) - - # only update self.history for non-merge commit - if not is_merge_commit: - for func_name in new_func: - self.history[commit.hexsha][func_name] = \ - new_func[func_name] - - for func_name in modified_func: - self.history[commit.hexsha][func_name] = \ - modified_func[func_name] - - return 0 - - def _second_phase(self, diff, commit, old_fname=None, new_fname=None): - - if ((old_fname and self.fname_filter(old_fname)) or - (new_fname and self.fname_filter(new_fname))): - - adds, dels = self.parse_patch(diff.diff) - if adds is None or dels is None: - return -1 - modified_func, inv_modified_func = {}, {} - - if old_fname is not None: - old_root = self._get_xml_root(commit.parents[0], old_fname) - if old_root is None: - return -1 - - if self.lang == 'c': - modified_func = get_changed_functions( - *get_func_ranges_c(old_root), adds, dels) - elif self.lang == 'java': - modified_func = get_changed_functions( - *get_func_ranges_java(old_root), adds, dels) - - if new_fname is not None: - inv_adds, inv_dels = _inverse_diff_result(adds, dels) - new_root = self._get_xml_root(commit, new_fname) - if new_root is None: - return -1 - - if self.lang == 'c': - inv_modified_func = get_changed_functions( - *get_func_ranges_c(new_root), inv_adds, inv_dels) - elif self.lang == 'java': - inv_modified_func = get_changed_functions( - *get_func_ranges_java(new_root), inv_adds, inv_dels) - - for func_name in modified_func: - if func_name in self.G: - self.history[commit.hexsha][func_name] = \ - modified_func[func_name] - - for func_name in inv_modified_func: - if func_name in self.G and func_name not in modified_func: - self.history[commit.hexsha][func_name] = \ - inv_modified_func[func_name] - - def parse_patch(self, patch): - additions, deletions = None, None - try: - additions, deletions = self.patch_parser.parse( - patch.decode('utf-8', 'replace')) - except UnicodeDecodeError: - print("UnicodeDecodeError in function parse_patch!") - except: - print("Unknown error in function parse_patch!") - return additions, deletions - - def update_shares(self, alpha): - self.scores = devrank(self.G, alpha=alpha) - for sha in self.history: - self.share[sha] = 0 - for func_name in self.history[sha]: - if func_name in self.G: - # this condition handles the case where - # func_name is deleted by sha, - # but has never been added or modified before - self.share[sha] += \ - (self.history[sha][func_name] / - self.G.node[func_name]['num_lines']) \ - * self.scores[func_name] - - def devrank_commits(self, alpha): - self.update_shares(alpha) - return sorted(self.share.items(), key=lambda x: x[1], reverse=True) - - def devrank_functions(self, alpha): - self.scores = devrank(self.G, alpha=alpha) - return sorted(self.scores.items(), key=lambda x: x[1], reverse=True) - - def devrank_developers(self, alpha, sha_to_type={}, coefs=[1, 1, 1, 1]): - self.update_shares(alpha) - email_to_share = {} - email_to_name = {} - - hexsha_to_type = {} - for sha, t in sha_to_type.items(): - c = self.repo.commit(sha) - hexsha_to_type[c.hexsha] = t - - for sha in self.history: - if sha in hexsha_to_type: - coef = coefs[int(hexsha_to_type[sha])] - else: - coef = 1 - actor = self.repo.commit(sha).author - email = actor.email - email_to_name[email] = actor.name - if email in email_to_share: - email_to_share[email] += coef * self.share[sha] - else: - email_to_share[email] = coef * self.share[sha] - _normalize_shares(email_to_share) - sorted_shares = sorted(email_to_share.items(), - key=lambda x: x[1], - reverse=True) - return sorted_shares, email_to_name - - def locrank_commits(self): - self.loc = {} - for sha in self.history: - self.loc[sha] = 0 - for func_name in self.history[sha]: - self.loc[sha] += self.history[sha][func_name] - return sorted(self.loc.items(), key=lambda x: x[1], reverse=True) - - def __getstate__(self): - state = super().__getstate__() - state['G'] = self.G - state['history'] = self.history - state['lang'] = self.lang - state['exts'] = self.exts - state['env'] = self.env - return state - - def __setstate__(self, state): - super().__setstate__(state) - self.share = {} - self.patch_parser = PatchParser() - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/persper/graphs/call_graph/c.py b/persper/graphs/call_graph/c.py deleted file mode 100644 index 50ef6604d76..00000000000 --- a/persper/graphs/call_graph/c.py +++ /dev/null @@ -1,150 +0,0 @@ -import networkx as nx -from persper.graphs.call_graph.utils import remove_edges_of_node, ns, line_attr - - -class NotFunctionCallError(Exception): - """Raise for false positive nodes""" - - -def handle_function(func_node): - """Given a node, - return function name and function range (start & end lineno)""" - - name_node = func_node.find('srcml:name', ns) - func_name, start_line = handle_name(name_node) - if not func_name or not start_line: - print('Function name/start not found!') # very unlikely to happen - return None, None, None - - block_node = func_node.find('srcml:block', ns) - if block_node is None: - try: - block_node = func_node.xpath('./following-sibling::srcml:block', - namespaces=ns)[0] - except: - print("Block node not found (in func {})".format(func_name)) - return func_name, None, None - try: - pos_node = block_node.find('pos:position', ns) - end_line = int(pos_node.attrib[line_attr]) - except: - print("Block node doesn't have position node inside!") - return func_name, None, None - - return func_name, start_line, end_line - - -def handle_name(name_node): - """Given an node, - return its text content and position (line)""" - text, line = None, None - if name_node is not None: - text = name_node.text - line = int(name_node.attrib[line_attr]) - return text, line - - -def handle_call(call_node): - """Given an node, return function name being called - - Throws NotFunctionCallException - - Case 1: casting function pointer is not function call - Example: tmp.sa_handler = (void (*)(int)) handler; - - Case 2: function call from struct variable - Example: tty->write(tty) - - """ - name_node = call_node.find('srcml:name', ns) - if name_node is None: - # Case 1 - raise NotFunctionCallError() - callee_name = name_node.text - if callee_name is None: - # Case 2 - callee_name = name_node[-1].text - return callee_name - - -def build_call_graph_c(roots, G=None): - if G is None: - G = nx.DiGraph() - - new_func = {} - func_to_file = {} - for root in roots: - # print('------ ' + root.attrib['filename'] + ' ------') - - for func_node in root.findall('./srcml:function', namespaces=ns): - - caller_name, start_line, end_line = handle_function(func_node) - if not caller_name: - continue - - if start_line and end_line: - num_lines = end_line - start_line + 1 - else: - # default num_lines is 1 - num_lines = 1 - - if caller_name not in G: - # Case 1: hasn't been defined and hasn't been called - new_func[caller_name] = num_lines - G.add_node(caller_name, num_lines=num_lines, defined=True) - elif not G.node[caller_name]['defined']: - # Case 2: has been called but hasn't been defined - new_func[caller_name] = num_lines - G.node[caller_name]['defined'] = True - G.node[caller_name]['num_lines'] = num_lines - else: - # Case 3: has been called and has been defined - # it is modified in the latest commit - # pass because it's not a new function - # so no need to add it to new_func and to - # update G.node[caller_name]['num_lines'] - pass - - func_to_file[caller_name] = root.attrib['filename'] - - # handle all function calls - for call_node in func_node.xpath('.//srcml:call', namespaces=ns): - - try: - callee_name = handle_call(call_node) - except NotFunctionCallError: - continue - except: - print("Callee name not found! (in func %s)" % caller_name) - continue - - if callee_name not in G: - G.add_node(callee_name, num_lines=1, defined=False) - G.add_edge(caller_name, callee_name) - - return G, new_func, func_to_file - - -def update_call_graph_c(G, roots, modified_func): - for func_name in modified_func: - if func_name in G: - remove_edges_of_node(G, func_name, in_edges=False) - G.node[func_name]['num_lines'] += modified_func[func_name] - - # here roots should be constructed from the more recent commit - # new functions and their sizes are stored in new_func dictionary - _, new_func, _ = build_call_graph_c(roots, G) - return new_func - - -def get_func_ranges_c(root): - func_names, func_ranges = [], [] - for func_node in root.findall('./srcml:function', namespaces=ns): - - func_name, start_line, end_line = handle_function(func_node) - if not (func_name and start_line and end_line): - continue - - func_ranges.append([start_line, end_line]) - func_names.append(func_name) - return func_names, func_ranges diff --git a/persper/graphs/call_graph/cpp.py b/persper/graphs/call_graph/cpp.py deleted file mode 100644 index 716146003bc..00000000000 --- a/persper/graphs/call_graph/cpp.py +++ /dev/null @@ -1,51 +0,0 @@ - -ns = {'srcml': 'http://www.srcML.org/srcML/src', 'pos': 'http://www.srcML.org/srcML/position'} - -def get_func_ranges_cpp(root): - func_ranges, func_names = [], [] - for func_node in root.xpath('./srcml:constructor | ./srcml:function', namespaces=ns): - - func_name, start_line, end_line = handle_function(func_node) - if not (func_name and start_line and end_line): - continue - - func_ranges.append([start_line, end_line]) - func_names.append(func_name) - return func_names, func_ranges - -def handle_name(name_node): - func_id, line = None, None - if name_node != None: - if name_node.text: - func_id = name_node.text - line = int(name_node.attrib['{http://www.srcML.org/srcML/position}line']) - else: - try: - # alternative solution is to use - # graphs.call_graph.utils.transform_node_to_src - class_name = name_node[0].text - line = int(name_node[0].attrib['{http://www.srcML.org/srcML/position}line']) - assert(name_node[1].text == "::") - func_name = name_node[2].text - func_id = "{}::{}".format(class_name, func_name) - except: - import pdb - pdb.set_trace() - return func_id, line - -def handle_function(func_node): - - name_node = func_node.find('srcml:name', ns) - func_id, start_line = handle_name(name_node) - if not func_id or not start_line: - print('Function name/start not found!') - return None, None, None - - block_node = func_node.find('srcml:block', ns) - try: - pos_node = block_node.find('pos:position', ns) - end_line = int(pos_node.attrib['{http://www.srcML.org/srcML/position}line']) - except: - return func_id, None, None - - return func_id, start_line, end_line diff --git a/persper/graphs/call_graph/java.py b/persper/graphs/call_graph/java.py deleted file mode 100644 index 9b06f49d53b..00000000000 --- a/persper/graphs/call_graph/java.py +++ /dev/null @@ -1,411 +0,0 @@ -import networkx as nx -from lxml import etree -from persper.graphs.call_graph.utils import transform_node_to_src, remove_edges_of_node -from persper.graphs.call_graph.utils import ns, line_attr - - -def generate_fid(class_name, func_name): - return class_name + ':' + func_name - - -def decompose_fid(func_id): - return func_id.split(':') - - -def get_specifiers(node): - """Helper function to first find all specifier nodes - and then return their texts""" - return [n.text for n in node.findall('./srcml:specifier', ns)] - - -def handle_name_node(name_node): - return transform_node_to_src(name_node).strip() - - """ - child_nodes = name_node.getchildren() - text_of_itself = name_node.text or '' - if len(child_nodes) == 0: - return text_of_itself - else: - child_names = [] - for child_node in child_nodes: - child_names.append(handle_name_node(child_node)) - return text_of_itself + ''.join(child_names) - """ - - -def get_name(node): - """Helper function to first find name node and then parse name""" - return handle_name_node(node.find('srcml:name', ns)) - - -def get_type(node): - """First get type node, then get type node's name node, - finally returns node's type""" - type_node = node.find('srcml:type', ns) - return handle_name_node(type_node.find('srcml:name', ns)) - - -def handle_decl_node(decl_node): - type_node = decl_node.find('srcml:type', ns) - type_name_node = type_node.find('srcml:name', ns) - name_node = decl_node.find('srcml:name', ns) - - try: - type_name = handle_name_node(type_name_node) - var_name = handle_name_node(name_node) - except: - import pdb - pdb.set_trace() - - return type_name, var_name - - -def handle_decl_stmt_node(decl_stmt_node, local_env): - """ - Node Structure: - A node consists of one or more nodes, - each has a node and a node. - The node may or may not has a node, the following - declaration statement is an example: - - int c, char2, char3; - """ - prev_type = None - decl_nodes = decl_stmt_node.findall('./srcml:decl', ns) - for decl_node in decl_nodes: - type_node = decl_node.find('./srcml:type', ns) - type_name_node = type_node.find('./srcml:name', ns) - if type_name_node is None: - type_name = prev_type - else: - type_name = handle_name_node(type_name_node) - var_name = get_name(decl_node) - local_env[var_name] = type_name - prev_type = type_name - - -def handle_call_node(call_node, cl_name, local_env, env): - """Parse a call node and return the identifer of the function being called - Type of calls we handle: - Case 1: doSomething(args) - doSomething is a public/private static/instance - member method of cl_name - Case 2: A a = new A() - A is a class (env), A's constructor function is called in this case - Case 3: a.doSomething(args) - a is an object, could be newly instantiated in this - function (local_env), - or could be passed as a parameter (local_env), - or could be this class's public/private member variable (env) - doSomething could be either a static method or a instance method - Case 4: A.doSomething(args) - A is a class (env) - doSomething is one of A's static methods - Case 5: A.var.doSomething(args) - A is a class (env) - var is a public static member of class A (env) - Case 6: a.var.doSomething(args) - a is an object, could be newly instantiated in this - function (local_env), - or could be passed as a parameter (local_env), - or could be this class' public/private member variable (env) - var is a public (static) member of object a (local_env & env) - - Returns: - A String representing the signature of the function being called - """ - call_name = get_name(call_node) - - names_lst = [n.strip() for n in call_name.split('.')] - callee_func_name = names_lst[-1] - if len(names_lst) == 1: - previous_node = call_node.getprevious() - if previous_node is not None and previous_node.text == 'new': - # Case 1: calling constructor - callee_cl_name = callee_func_name - else: - # Case 2: calling member method - callee_cl_name = cl_name - return generate_fid(callee_cl_name, callee_func_name) - elif len(names_lst) == 2: - niq = names_lst[0] # niq => name in question - # check local_env first - if niq in local_env: - # Case 3 (local_env) - var_name = niq - callee_cl_name = local_env[var_name] - return generate_fid(callee_cl_name, callee_func_name) - elif niq in env[cl_name]['var']: - # case 3 (env) - var_name = niq - callee_cl_name = env[cl_name]['var'][var_name]['type'] - return generate_fid(callee_cl_name, callee_func_name) - elif niq in env: - # Case 4 - return generate_fid(niq, callee_func_name) - else: - # something went wrong, niq is probably a class not in env - # print("WARNING: niq not found in both env and local_env") - return generate_fid(niq, callee_func_name) - else: - # Case 5 or 6 - callee_cl_name = None - if names_lst[0] in local_env: - callee_cl_name = local_env[names_lst[0]] - for n in names_lst[1:-1]: - callee_cl_name = env[callee_cl_name]['var'][n]['type'] - return generate_fid(callee_cl_name, callee_func_name) - elif names_lst[0] in env[cl_name]['var']: - callee_cl_name = env[cl_name]['var'][names_lst[0]]['type'] - for n in names_lst[1:-1]: - callee_cl_name = env[callee_cl_name]['var'][n]['type'] - return generate_fid(callee_cl_name, callee_func_name) - elif names_lst[0] in env: - callee_cl_name = names_lst[0] - for n in names_lst[1:-1]: - callee_cl_name = env[callee_cl_name]['var'][n]['type'] - return generate_fid(callee_cl_name, callee_func_name) - else: - # something went wrong, names_lst[0] is probably a class not in env - # print("WARNING: names_lst[0] not found in both env and local_env") - approx_callee_cl_name = '.'.join(names_lst[:-1]) - return generate_fid(approx_callee_cl_name, callee_func_name) - - -def handle_param_lst_node(param_lst_node): - local_env = {} - param_nodes = param_lst_node.findall('./srcml:parameter', ns) - for param_node in param_nodes: - decl_node = param_node.find('./srcml:decl', ns) - type_name, var_name = handle_decl_node(decl_node) - local_env[var_name] = type_name - return local_env - - -def handle_func_node(func_node, class_name, G, new_func, fid_to_file, env): - """ - Args: - class_name: A String, name of the class this function belongs to - G: A nx.DiGraph object, storing the actual call graph - new_func: A dictionary, mapping a new function's identifer (fid) - to its size - fid_to_file: A dictionary, mapping fid to the file it belongs to - env: A dictionary, storing global environment - - Workflow Summary: - 1. Parse function name and generate fid - 2. Add caller function to call graph G - 3. Initialize local_env by parsing parameter list - 4. Iterate through subnodes of this function in document order - a. For node, parse it and get callee_fid, - add this new edge to call graph G - b. For node, parse it and update local_env - - Node Structure: - node's direct children include , , - , - - TODOs: - 1. Function Overload - a. Primitive type - 2. Polymorphism - 3. Collection - 4. Array - 5. Add logic to remove variable from local_env - 6. Nested class - 7. Anonymous class - """ - name_node = func_node.find('./srcml:name', ns) - block_node = func_node.find('./srcml:block', ns) - block_pos_node = block_node.find('./pos:position', ns) - if block_pos_node is None: - # probably a srcML parsing error - return - param_lst_node = func_node.find('./srcml:parameter_list', ns) - - func_name = get_name(func_node) - caller_fid = generate_fid(class_name, func_name) - start_line = int(name_node.attrib[line_attr]) - end_line = int(block_pos_node.attrib[line_attr]) - num_lines = end_line - start_line + 1 - - # local_env maps variable name to class name - try: - local_env = handle_param_lst_node(param_lst_node) - except: - print("Failed to parse parameter list for %s" % caller_fid) - return - - if caller_fid not in G: - # Case 1: hasn't been defined and hasn't been called - new_func[caller_fid] = num_lines - G.add_node(caller_fid, {'num_lines': num_lines, 'defined': True}) - elif not G.node[caller_fid]['defined']: - # Case 2: has been called but hasn't been defined - new_func[caller_fid] = num_lines - G.node[caller_fid]['defined'] = True - G.node[caller_fid]['num_lines'] = num_lines - else: - # Case 3: has been called and has been defined - # it is modified in the latest commit - # no need to add it to new_func or - # update G.node[caller_fid]['num_lines'] - pass - - fid_to_file[caller_fid] = env[class_name]['filename'] - - for node in block_node.iter('{*}call', '{*}decl_stmt'): - if 'call' in node.tag: - try: - callee_fid = handle_call_node(node, class_name, local_env, env) - except: - print("Excpetion in handle_call_node.") - continue - if callee_fid not in G: - G.add_node(callee_fid, {'num_lines': 1, 'defined': False}) - G.add_edge(caller_fid, callee_fid) - else: - handle_decl_stmt_node(node, local_env) - - -def handle_class_node(class_node, G, new_func, fid_to_file, env): - class_name = get_name(class_node) - - block_node = class_node.find('./srcml:block', ns) - func_nodes = block_node.findall('./srcml:function', ns) - for func_node in func_nodes: - handle_func_node(func_node, class_name, G, new_func, fid_to_file, env) - - -def prepare_env_class(class_node, env): - """ - Official Access Level Tutorial: - https://docs.oracle.com/javase/tutorial/java/javaOO/accesscontrol.html - - Node Structure: - node can have node or node - as its direct child - - Assumptions and TODOs: - 1. We assume every class method has modifiers (package private is rare) - 2. We currently don't distinguish between `protected` and `public` - 3. We don't keep record of methods' return types and arguments' types - """ - class_name = get_name(class_node) - filename = class_node.getparent().attrib['filename'] - # Class members are made of 2 things: - # 1. class's variable - # 2. class's methods - cl_env = {'var': {}, 'method': {}, 'filename': filename} - env[class_name] = cl_env - - # `this` and `super` - cl_env['var']['this'] = {'is_public': False, - 'is_static': False, - 'type': class_name} - super_node = class_node.find('./srcml:super', ns) - if super_node is not None: - extends_node = super_node.find('./srcml:extends', ns) - if extends_node is not None: - super_cl_name = get_name(extends_node) - cl_env['var']['super'] = {'is_public': False, - 'is_static': True, - 'type': super_cl_name} - - block_node = class_node.find('./srcml:block', ns) - - # member variables - decl_stmt_nodes = block_node.findall('./srcml:decl_stmt', ns) - for decl_stmt_node in decl_stmt_nodes: - decl_node = decl_stmt_node.find('./srcml:decl', ns) - var_name = get_name(decl_node) - var_type = get_type(decl_node) - specifiers = get_specifiers(decl_node) - is_public = 'protected' in specifiers or 'public' in specifiers - is_static = 'static' in specifiers - cl_env['var'][var_name] = {'is_public': is_public, - 'is_static': is_static, - 'type': var_type} - - # member methods - func_nodes = block_node.findall('./srcml:function', ns) - for func_node in func_nodes: - func_name = get_name(func_node) - specifiers = get_specifiers(func_node) - is_public = 'protected' in specifiers or 'public' in specifiers - is_static = 'static' in specifiers - cl_env['method'][func_name] = {'is_public': is_public, - 'is_static': is_static} - - -def prepare_env(root, env=None): - """ - env: class_name => [var/method] => [var_name/method_name] - """ - if env is None: - env = {} - - class_nodes = root.findall('./srcml:class', ns) - for class_node in class_nodes: - prepare_env_class(class_node, env) - return env - - -def build_call_graph_java(roots, G=None, env=None): - if G is None: - G = nx.DiGraph() - - new_func = {} - fid_to_file = {} - - # Initialize global environment - for root in roots: - env = prepare_env(root, env=env) - - # Build call graph - for root in roots: - class_nodes = root.xpath('./srcml:class', namespaces=ns) - for class_node in class_nodes: - handle_class_node(class_node, G, new_func, fid_to_file, env) - return G, new_func, fid_to_file, env - - -def update_call_graph_java(G, roots, modified_func, env=None): - for fid in modified_func: - if fid in G: - remove_edges_of_node(G, fid, in_edges=False) - G.node[fid]['num_lines'] += modified_func[fid] - - # here roots should be constructed from the more recent commit - # new functions and their sizes are stored in new_func dictionary - _, new_func, _, _ = build_call_graph_java(roots, G, env=env) - return new_func - - -def get_func_ranges_java(root): - fids, func_ranges = [], [] - for class_node in root.xpath('.//srcml:class', namespaces=ns): - try: - class_name = get_name(class_node) - except: - print("Class doesn't have name.") - continue - - block_node = class_node.find('./srcml:block', ns) - for func_node in block_node.findall('./srcml:function', ns): - try: - func_name = get_name(func_node) - fid = generate_fid(class_name, func_name) - - name_node = func_node.find('./srcml:name', ns) - block_node = func_node.find('./srcml:block', ns) - block_pos_node = block_node.find('./pos:position', ns) - start_line = int(name_node.attrib[line_attr]) - end_line = int(block_pos_node.attrib[line_attr]) - except: - continue - - fids.append(fid) - func_ranges.append([start_line, end_line]) - return fids, func_ranges diff --git a/persper/graphs/call_graph/utils.py b/persper/graphs/call_graph/utils.py deleted file mode 100644 index 32d951967af..00000000000 --- a/persper/graphs/call_graph/utils.py +++ /dev/null @@ -1,45 +0,0 @@ -ns = {'srcml': 'http://www.srcML.org/srcML/src', - 'pos': 'http://www.srcML.org/srcML/position'} - -line_attr = '{http://www.srcML.org/srcML/position}line' - - -def transform_node_to_src(node, s=None): - """Print out the source code of a xml node""" - if s is None: - s = "" - if node.text: - s += node.text - for child in node: - s = transform_node_to_src(child, s) - if node.tail: - s += node.tail - return s - - -def remove_edges_of_node(G, n, in_edges=True, out_edges=True): - """Remove edges of n, but keep the node itself in the graph - - >>> G3 = nx.DiGraph() - >>> G3.add_path([0, 1, 2, 3, 4]) - >>> remove_edges_of_node(G3, 2) - >>> G3.nodes() - [0, 1, 2, 3, 4] - >>> G3.edges() - [(0, 1), (3, 4)] - - """ - try: - nbrs = G._succ[n] - except KeyError: # NetworkXError if not in self - # raise NetworkXError("The node %s is not in the digraph."%(n, )) - print("The node %s is not in the digraph." % n) - return - if out_edges: - for u in nbrs: - del G._pred[u][n] - G._succ[n] = {} - if in_edges: - for u in G._pred[n]: - del G._succ[u][n] - G._pred[n] = {} diff --git a/persper/graphs/commit_graph.py b/persper/graphs/commit_graph.py deleted file mode 100644 index db4d4016e75..00000000000 --- a/persper/graphs/commit_graph.py +++ /dev/null @@ -1,147 +0,0 @@ -from enum import Enum -import argparse -import networkx as nx -import sys -import os -import subprocess -from persper.graphs.parse_patch import parse_patch -from persper.graphs.cpp_tools import get_func_ranges_cpp, fname_filter_cpp -from persper.graphs.ruby_tools import get_func_ranges_ruby, fname_filter_ruby -from persper.graphs.git_tools import initialize_repo, get_contents -from persper.graphs.processor import Processor -from persper.graphs.write_graph_to_dot import write_G_to_dot_with_pr - -parser = argparse.ArgumentParser( - description="Draw commit graph for a git repository") -parser.add_argument('repo', type=str, - help="path to target repo") -parser.add_argument('language', type=str, - help="programming language of the target repo, currently support [cpp, ruby]") -parser.add_argument('--output', type=str, - help="output path of generated svg, default as working directory") - -class Language(Enum): - CPP = 1 - RUBY = 2 - -def add_edge(G, sp, ep, func_id): - if ep in G[sp]: - G[sp][ep]['func_ids'].append(func_id) - else: - G.add_edge(sp, ep, func_ids=[func_id]) - -def intersected(a, b): - return a[0] <= b[0] <= a[1] or b[0] <= a[0] <= b[1] - -def get_modified_func_ids(func_ranges, modified_intervals, func_ids): - search_ptr = 0 - num_intervals = len(modified_intervals) - modified_func_ids = [] - for func_r, func_id in zip(func_ranges, func_ids): - for i in range(search_ptr, num_intervals): - if intersected(func_r, modified_intervals[i]): - - modified_func_ids.append(func_id) - search_ptr = i - break - return modified_func_ids - -class CommitGraph(Processor): - - def __init__(self, repo_path, language_str): - super().__init__(repo_path) - language = Language[language_str] - if language == Language.CPP: - self.fname_filter = fname_filter_cpp - self.func_extractor = get_func_ranges_cpp - elif language == Language.RUBY: - self.fname_filter = fname_filter_ruby - self.func_extractor = get_func_ranges_ruby - else: - print("This language is not supported yet!") - - def start_process(self): - self.G = nx.DiGraph() - self.func_commit = {} - - def start_process_commit(self, commit): - self.G.add_node(commit.hexsha) - - def on_add(self, diff, commit): - fname = diff.b_blob.path - sha = commit.hexsha - if self.fname_filter(fname): - file_contents = get_contents(self.repo, commit, fname) - func_ids, _ = self.func_extractor(file_contents, fname) - for func_id in func_ids: - self.func_commit[func_id] = sha - - def on_delete(self, diff, commit): - fname = diff.a_blob.path - sha = commit.hexsha - if self.fname_filter(fname): - last_commit = commit.parents[0] - file_contents = get_contents(self.repo, last_commit, fname) - func_ids, _ = self.func_extractor(file_contents, fname) - for func_id in func_ids: - if func_id in self.func_commit: - add_edge(self.G, sha, self.func_commit[func_id], func_id) - del self.func_commit[func_id] - - def on_rename(self, diff, commit): - # when similarity is 100%, diff.a_blob and diff.b_blob are None, so don't use them - new_fname = diff.rename_to - old_fname = diff.rename_from - last_commit = commit.parents[0] - sha = commit.hexsha - - if self.fname_filter(new_fname) or self.fname_filter(old_fname): - file_contents = get_contents(self.repo, last_commit, old_fname) - func_ids, func_ranges = self.func_extractor(file_contents, old_fname) - try: - modified_intervals = parse_patch(diff.diff.decode("utf-8")) - except UnicodeDecodeError: - print("UnicodeDecodeError Found in change_type {}".format(diff.change_type)) - return -1 - modified_func_ids = get_modified_func_ids(func_ranges, modified_intervals, func_ids) - for func_id in modified_func_ids: - if func_id in self.func_commit: - add_edge(self.G, sha, self.func_commit[func_id], func_id) - self.func_commit[func_id] = sha - - def on_modify(self, diff, commit): - assert diff.b_blob.path == diff.a_blob.path - fname = diff.b_blob.path - last_commit = commit.parents[0] - sha = commit.hexsha - - if self.fname_filter(fname): - file_contents = get_contents(self.repo, last_commit, fname) - func_ids, func_ranges = self.func_extractor(file_contents, fname) - try: - modified_intervals = parse_patch(diff.diff.decode("utf-8")) - except UnicodeDecodeError: - print("UnicodeDecodeError Found in change_type {}".format(diff.change_type)) - return -1 - modified_func_ids = get_modified_func_ids(func_ranges, modified_intervals, func_ids) - - for func_id in modified_func_ids: - if func_id in self.func_commit: - add_edge(self.G, sha, self.func_commit[func_id], func_id) - self.func_commit[func_id] = sha - - -def draw_commit_graph(repo_path, language, output_path=None, num_commits=None): - repo_name = os.path.basename(repo_path) - cg = CommitGraph(repo_path, language) - cg.process(from_beginning=True, num_commits=num_commits) - pr = nx.pagerank(cg.G, alpha=0.85) - write_G_to_dot_with_pr(cg.G, pr, repo_name + ".dot", edge_attrib="func_ids") - subprocess.call('dot -Tsvg {}.dot -o {}.svg'.format(repo_name, repo_name), shell=True) - -def main(): - args = parser.parse_args() - draw_commit_graph(args['repo'], args['language']) - -if __name__ == '__main__': - main() diff --git a/persper/graphs/cpp.py b/persper/graphs/cpp.py deleted file mode 100644 index 4e6cd59696e..00000000000 --- a/persper/graphs/cpp.py +++ /dev/null @@ -1,3 +0,0 @@ -from persper.graphs.c import CGraphServer - -CPPGraphServer = CGraphServer diff --git a/persper/graphs/cpp_tools.py b/persper/graphs/cpp_tools.py deleted file mode 100644 index ed96ae8e8c8..00000000000 --- a/persper/graphs/cpp_tools.py +++ /dev/null @@ -1,38 +0,0 @@ -import re -import subprocess -from git import Repo -import networkx as nx - -from persper.graphs.write_graph_to_dot import write_G_to_dot_with_pr - -def get_func_ranges_cpp(src, fname): - re_signature = re.compile("""^(?P\w+(\s*[\*\&])?)\s+ - ((?P\w+)::)? - (?P\w+)\s* - \([^;]+$ - """, re.VERBOSE ) - func_ids = [] - func_ranges = [] - ptr = -1 - num_lines = 0 - for lineno, line in enumerate(src.split('\n'), 1): - num_lines += 1 - m = re_signature.search(line) - if m: - d = m.groupdict() - if d['class_name']: - func_ids.append('{}::{}'.format(d['class_name'], d['func_name'])) - else: - func_ids.append(d['func_name']) - if ptr != -1: - func_ranges.append([ptr, lineno - 1]) - ptr = lineno - if ptr != -1: - func_ranges.append([ptr, num_lines]) - - return func_ids, func_ranges - -def fname_filter_cpp(fname): - return fname.endswith('.cc') or fname.endswith('.cpp') - - diff --git a/persper/graphs/detect_change.py b/persper/graphs/detect_change.py deleted file mode 100644 index 14826f65443..00000000000 --- a/persper/graphs/detect_change.py +++ /dev/null @@ -1,70 +0,0 @@ -def get_intersected_length(a, b): - """ - >>> get_intersected_length([1, 9], [2, 8]) - 7 - >>> get_intersected_length([2, 8], [1, 9]) - 7 - >>> get_intersected_length([1, 4], [1, 5]) - 4 - >>> get_intersected_length([2, 10], [4, 11]) - 7 - """ - start = a[0] if a[0] >= b[0] else b[0] - end = a[1] if a[1] <= b[1] else b[1] - if start > end: - return 0 - else: - return end - start + 1 - - -def get_changed_functions(func_names, func_ranges, additions, deletions, - separate=False): - """ - Args: - func_names: A list of function names, - usually extracted from old src file, - so new functions aren't included. - func_ranges: A sorted list of function ranges - in the same order of func_names. - additions: A list of pair of integers, - deletions: A list of pair of integers, - separate: A boolean flag, if set to True, additions and deletions are - reported separately. - - Returns: - A dictionary where keys are function names and values are - number of lines edited. - """ - info = {} - - if (func_names is None or func_ranges is None or - additions is None or deletions is None): - return info - - def update_info(fn, num_lines, key): - """key should be one of 'adds' or 'dels'.""" - if fn in info: - info[fn][key] += num_lines - else: - info[fn] = {'adds': 0, 'dels': 0} - info[fn][key] = num_lines - - add_ptr, del_ptr = 0, 0 - num_adds, num_dels = len(additions), len(deletions) - for fn, fr in zip(func_names, func_ranges): - for i in range(add_ptr, num_adds): - if fr[0] <= additions[i][0] <= fr[1]: - update_info(fn, additions[i][1], 'adds') - add_ptr = i + 1 - - for j in range(del_ptr, num_dels): - inter_length = get_intersected_length(fr, deletions[j]) - if inter_length > 0: - update_info(fn, inter_length, 'dels') - del_ptr = j - - if not separate: - for fn in info: - info[fn] = info[fn]['adds'] + info[fn]['dels'] - - return info diff --git a/persper/graphs/devrank.py b/persper/graphs/devrank.py deleted file mode 100644 index 3977d7c279d..00000000000 --- a/persper/graphs/devrank.py +++ /dev/null @@ -1,58 +0,0 @@ -import numpy as np -from numpy import linalg as LA -from scipy.sparse import coo_matrix - - -def devrank(G, count_self=False, alpha=0.85, epsilon=1e-5, max_iters=300): - """Memory efficient DevRank using scipy.sparse""" - ni = {} - for i, u in enumerate(G): - ni[u] = i - - def sizeof(u): - return G.node[u]['num_lines'] - - num_nodes = len(G.nodes()) - row, col, data = [], [], [] - for u in G: - num_out_edges = len(G[u]) - if num_out_edges > 0: - total_out_sizes = 0 - for v in G[u]: - total_out_sizes += sizeof(v) - if count_self: - total_out_sizes += sizeof(u) - row.append(ni[u]) - col.append(ni[u]) - data.append(sizeof(u) / total_out_sizes) - for v in G[u]: - row.append(ni[v]) - col.append(ni[u]) - data.append(sizeof(v) / total_out_sizes) - - P = coo_matrix((data, (row, col)), shape=(num_nodes, num_nodes)).tocsr() - - universe_size = 0 - for u in G: - universe_size += sizeof(u) - - p = np.empty(num_nodes) - for u in G: - p[ni[u]] = sizeof(u) / universe_size - - v = np.ones(num_nodes) / num_nodes - - for i in range(max_iters): - new_v = alpha * P.dot(v) - gamma = LA.norm(v, 1) - LA.norm(new_v, 1) - new_v += gamma * p - delta = LA.norm(new_v - v, 1) - if delta < epsilon: - break - v = new_v - - pr = {} - for u in G: - pr[u] = v[ni[u]] - - return pr diff --git a/persper/graphs/git_tools.py b/persper/graphs/git_tools.py deleted file mode 100644 index 716f80e3156..00000000000 --- a/persper/graphs/git_tools.py +++ /dev/null @@ -1,32 +0,0 @@ -from git.exc import InvalidGitRepositoryError, NoSuchPathError -from git import Repo -import sys - -EMPTY_TREE_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - - -def _diff_with_first_parent(commit): - if len(commit.parents) == 0: - prev_commit = EMPTY_TREE_SHA - else: - prev_commit = commit.parents[0] - # commit.diff automatically detect renames - return commit.diff(prev_commit, - create_patch=True, R=True, indent_heuristic=True) - - -def initialize_repo(repo_path): - try: - repo = Repo(repo_path) - except InvalidGitRepositoryError as e: - print("Invalid Git Repository!") - sys.exit(-1) - except NoSuchPathError as e: - print("No such path error!") - sys.exit(-1) - return repo - - -def get_contents(repo, commit, path): - """Get contents of a path within a specific commit""" - return repo.git.show('{}:{}'.format(commit.hexsha, path)) diff --git a/persper/graphs/graph_server.py b/persper/graphs/graph_server.py deleted file mode 100644 index 6acbaf26aed..00000000000 --- a/persper/graphs/graph_server.py +++ /dev/null @@ -1,81 +0,0 @@ -from abc import ABC -from abc import abstractmethod - -JS_FILENAME_REGEXES = [ - r'.+\.js$', - r'^(?!dist/).+', - r'^(?!test(s)?/).+', - r'^(?!spec/).+', - r'^(?!build/).+', - r'^(?!bin/).+', - r'^(?!doc(s)?/).+' -] - -# todo(hezheng) consider moving these regexes to their corresponding language file -C_FILENAME_REGEXES = [ - r'.+\.(h|c)$' -] - -# http://gcc.gnu.org/onlinedocs/gcc-4.4.1/gcc/Overall-Options.html#index-file-name-suffix-71 -CPP_FILENAME_REGEXES = { - r'.+\.(c|cc|cxx|cpp|CPP|c\+\+|C|hh|hpp|Hpp|h\+\+|H)$' -} - - -class GraphServer(ABC): - - @abstractmethod - def update_graph(self, old_filename: str, old_src: str, new_filename: str, new_src: str, patch: bytes): - """ - Update the graph with a single-file patch - :param old_filename: the path to a file that the commit modifies - :param old_src: the source code of the file before the commit - :param new_filename: the path to the file after the commit - :param new_src: the source code of the file after the commit - :param patch: the raw patch generated by GitPython diff - :return: a function-ID-to-changed-LOCs mapping and an old-function-ID-to-new-function-ID mapping - """ - pass - - @abstractmethod - def parse(self, old_filename: str, old_src: str, new_filename: str, new_src: str, patch: bytes): - """ - Parse a single-file patch without updating the graph - :param old_filename: the path to a file that the commit modifies - :param old_src: the source code of the file before the commit - :param new_filename: the path to the file after the commit - :param new_src: the source code of the file after the commit - :param patch: the raw patch generated by GitPython diff - :return: a function-ID-to-changed-LOCs mapping and an old-function-ID-to-new-function-ID mapping - """ - pass - - @abstractmethod - def get_graph(self): - """ - Retrieve the graph - :return: A NetworkX graph object - """ - pass - - @abstractmethod - def reset_graph(self): - """Reset the graph discarding all data""" - pass - - @abstractmethod - def filter_file(self, filename): - """ - Check if the file should be filtered out - :param filename: the path of the file to check - :return: True if the file should be selected; False otherwise. - """ - pass - - @abstractmethod - def config(self, param: dict): - """ - One-time configuration of the server for following calls - :param param: key-value pairs of configuration - """ - pass diff --git a/persper/graphs/graph_server_http.py b/persper/graphs/graph_server_http.py deleted file mode 100644 index 5bea5ff0f39..00000000000 --- a/persper/graphs/graph_server_http.py +++ /dev/null @@ -1,54 +0,0 @@ -from networkx.readwrite import json_graph -from persper.graphs.graph_server import GraphServer -import re -import requests -import urllib.parse - - -class GraphServerHttp(GraphServer): - def __init__(self, server_addr, filename_regex_strs): - self.server_addr = server_addr - self.filename_regexes = [re.compile(regex_str) for regex_str in filename_regex_strs] - self.config_param = dict() - - def update_graph(self, old_filename, old_src, new_filename, new_src, patch): - payload = {'oldFname': old_filename, - 'oldSrc': old_src, - 'newFname': new_filename, - 'newSrc': new_src, - 'patch': patch.decode('utf-8', 'replace'), - 'config': self.config_param} - - update_url = urllib.parse.urljoin(self.server_addr, '/update') - r = requests.post(update_url, json=payload).json() - return r['idToLines'], r['idMap'] - - def parse(self, old_filename, old_src, new_filename, new_src, patch): - payload = {'oldFname': old_filename, - 'oldSrc': old_src, - 'newFname': new_filename, - 'newSrc': new_src, - 'patch': patch.decode('utf-8', 'replace'), - 'config': self.config_param} - - stats_url = urllib.parse.urljoin(self.server_addr, '/stats') - r = requests.get(stats_url, json=payload).json() - return r['idToLines'], r['idMap'] - - def get_graph(self): - graph_url = self.server_addr + '/callgraph' - r = requests.get(graph_url) - return json_graph.node_link_graph(r.json()) - - def reset_graph(self): - reset_url = urllib.parse.urljoin(self.server_addr, '/reset') - requests.post(reset_url) - - def filter_file(self, filename): - for regex in self.filename_regexes: - if not regex.match(filename): - return False - return True - - def config(self, param): - self.config_param = param diff --git a/persper/graphs/inverse_diff.py b/persper/graphs/inverse_diff.py deleted file mode 100644 index 24274c1c6eb..00000000000 --- a/persper/graphs/inverse_diff.py +++ /dev/null @@ -1,41 +0,0 @@ - -def inverse_diff(adds, dels): - """ - >>> adds = [[11, 1], [32, 1]] - >>> dels = [[11, 11], [31, 32]] - >>> _inverse_diff_result(adds, dels) - ([[10, 1], [30, 2]], [[11, 11], [31, 31]]) - """ - diff = 0 - add_ptr, del_ptr = 0, 0 - num_adds, num_dels = len(adds), len(dels) - inv_adds, inv_dels = [], [] - - def _handle_a(a): - nonlocal diff - inv_dels.append([diff + a[0] + 1, diff + a[0] + a[1]]) - diff += a[1] - - def _handle_d(d): - nonlocal diff - inv_adds.append([diff + d[0] - 1, d[1] - d[0] + 1]) - diff -= (d[1] - d[0] + 1) - - while add_ptr < num_adds or del_ptr < num_dels: - if add_ptr < num_adds and del_ptr < num_dels: - if adds[add_ptr][0] < dels[del_ptr][0]: - _handle_a(adds[add_ptr]) - add_ptr += 1 - else: - _handle_d(dels[del_ptr]) - del_ptr += 1 - elif add_ptr < num_adds and del_ptr >= num_dels: - # we have finished dels - _handle_a(adds[add_ptr]) - add_ptr += 1 - else: - # we have finished adds - _handle_d(dels[del_ptr]) - del_ptr += 1 - - return inv_adds, inv_dels diff --git a/persper/graphs/iterator.py b/persper/graphs/iterator.py deleted file mode 100644 index 2a93ba09940..00000000000 --- a/persper/graphs/iterator.py +++ /dev/null @@ -1,186 +0,0 @@ -import time -from persper.graphs.git_tools import initialize_repo -from collections import deque - - -class RepoIterator(): - - def __init__(self, repo_path): - self.repo_path = repo_path - self.repo = initialize_repo(repo_path) - self.visited = set() - self.last_processed_commit = None - - def iter(self, rev=None, - from_beginning=False, - num_commits=None, - continue_iter=False, - end_commit_sha=None, - into_branches=False, - max_branch_length=100, - min_branch_date=None): - """ - This function supports four ways of specifying the - range of commits to return: - - Method 1: rev - Pass `rev` parameter and set both - `from_beginning` and `continue_iter` to False. - `rev` is the revision specifier which follows - an extended SHA-1 syntax. Please refer to git-rev-parse - for viable options. `rev' should only include commits - on the master branch. - - Method 2: from_beginning & num_commits (optional) - Set `from_beginning` to True and - pass `num_commits` parameter. Using this - method, the function will start from the - very first commit on the master branch and - process the following `num_commits` commits - (also on the master branch). - - Method 3: continue_iter & num_commits - Set `continue_iter` to True and pass - `num_commits` parameter. Using this method, the - function will resume processing from succeeding commit of - `self.last_processed_commit` for `num_commits` commits. - - Method 4: continue_iter & end_commit_sha - Set `continue_iter` to True and pass - `end_commit_sha` parameter. The range of continued processing - will be `self.last_processed_commit.hexsha..end_commit_sha`. - - Args: - rev: A string, see above. - num_commits: An int, see above. - from_beginning: A boolean flag, see above. - continue_iter: A boolean flag, see above. - end_commit_sha: A string, see above. - into_branches: A boolean flag. - max_branch_length: An int, the maximum number of commits - to trace back before abortion. - min_branch_date: A python time object, stop backtracing if - a commit is authored before this time. - """ - commits = [] - branch_commits = [] - - if not continue_iter: - self.reset_state() - - # Method 2 - if from_beginning: - commits = list(self.repo.iter_commits(first_parent=True)) - if num_commits is not None: - commits = commits[-num_commits:] - - elif continue_iter: - if not self.last_processed_commit: - print("No history exists yet, terminated.") - return [], [] - - # Method 4 - if end_commit_sha: - rev = self.last_processed_commit.hexsha + '..' + end_commit_sha - commits = list(self.repo.iter_commits( - rev, first_parent=True)) - # Method 3 - elif num_commits: - # some project's main branch might not be master, thus use HEAD - rev = self.last_processed_commit.hexsha + '..HEAD' - commits = list(self.repo.iter_commits( - rev, first_parent=True))[-num_commits:] - else: - print("Both end_commit_sha and num_commits are None.") - return [], [] - - else: - # Method 1 - commits = list(self.repo.iter_commits(rev, first_parent=True)) - - # set self.last_processed_commit - if len(commits) > 0: - self.last_processed_commit = commits[0] - else: - print("The range specified is empty, terminated.") - return [], [] - - for commit in reversed(commits): - self.visited.add(commit.hexsha) - - if into_branches: - # find all merge commits - start_points = deque() - for commit in reversed(commits): - if len(commit.parents) > 1: - for pc in commit.parents[1:]: - start_points.append(pc) - - self.branch_lengths = [] - - while len(start_points) > 0: - cur_commit = start_points.popleft() - branch_length = 0 - - while True: - - # stop tracing back along this branch - # if cur_commit has been visited - if cur_commit.hexsha in self.visited: - break - - # stop if we have reached time boundary - authored_date = time.gmtime(cur_commit.authored_date) - if min_branch_date and min_branch_date > authored_date: - break - - # stop if we have reached max_branch_length - if branch_length >= max_branch_length: - print("WARNING: MAX_BRANCH_LENGTH reached.") - break - - self.visited.add(cur_commit.hexsha) - branch_commits.append(cur_commit) - branch_length += 1 - - # stop if we have reached the very first commit - if len(cur_commit.parents) == 0: - break - - # add to queue if cur_commit is a merge commit - if len(cur_commit.parents) > 1: - for pc in cur_commit.parents[1:]: - start_points.append(pc) - - # get next commit - cur_commit = cur_commit.parents[0] - - if branch_length > 0: - self.branch_lengths.append(branch_length) - - return commits, branch_commits - - def reset_state(self): - self.visited = set() - self.last_processed_commit = None - - def __getstate__(self): - state = {} - state['repo_path'] = self.repo_path - state['visited'] = self.visited - # Avoid directly pickle Commit object - if self.last_processed_commit is None: - state['last_processed_sha'] = None - else: - state['last_processed_sha'] = self.last_processed_commit.hexsha - return state - - def __setstate__(self, state): - self.repo_path = state['repo_path'] - self.visited = state['visited'] - self.repo = initialize_repo(state['repo_path']) - if state['last_processed_sha'] is None: - self.last_processed_commit = None - else: - self.last_processed_commit = self.repo.commit( - state['last_processed_sha']) diff --git a/persper/graphs/pagerank.py b/persper/graphs/pagerank.py deleted file mode 100644 index 186ea7aa9f9..00000000000 --- a/persper/graphs/pagerank.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy import linalg as LA -import numpy as np -from scipy.sparse import coo_matrix - - -def pagerank(G, alpha=0.85, epsilon=1e-5, max_iters=300): - """Memory efficient PageRank using scipy.sparse - This function implements Algo 1. in "A Survey on PageRank Computing" - """ - ni = {} - for i, u in enumerate(G): - ni[u] = i - - num_nodes = len(G.nodes()) - - row, col, data = [], [], [] - for u in G: - num_out_edges = len(G[u]) - if num_out_edges > 0: - w = 1 / num_out_edges - for v in G[u]: - row.append(ni[v]) - col.append(ni[u]) - data.append(w) - - P = coo_matrix((data, (row, col)), shape=(num_nodes, num_nodes)).tocsr() - p = np.ones(num_nodes) / num_nodes - v = np.ones(num_nodes) / num_nodes - - for i in range(max_iters): - new_v = alpha * P.dot(v) - gamma = LA.norm(v, 1) - LA.norm(new_v, 1) - new_v += gamma * p - delta = LA.norm(new_v - v, 1) - if delta < epsilon: - break - v = new_v - - pr = {} - for u in G: - pr[u] = v[ni[u]] - - return pr diff --git a/persper/graphs/parse_patch.py b/persper/graphs/parse_patch.py deleted file mode 100644 index 8cb15af3f29..00000000000 --- a/persper/graphs/parse_patch.py +++ /dev/null @@ -1,45 +0,0 @@ -import re - -example_patch = \ -r"""--- Portfile.orig 2011-07-25 18:52:12.000000000 -0700 -+++ Portfile 2011-07-25 18:53:35.000000000 -0700 -@@ -2,7 +2,7 @@ - PortSystem 1.0 - name foo - --version 1.3.0 -+version 1.4.0 - categories net - maintainers nomaintainer - description A network monitoring daemon. -@@ -13,9 +13,9 @@ - - homepage http://rsug.itd.umich.edu/software/${name} - - master_sites ${homepage}/files/ --checksums rmd160 f0953b21cdb5eb327e40d4b215110b71 -+checksums rmd160 01532e67a596bfff6a54aa36face26ae - extract.suffix .tgz - platforms darwin""" - -def parse_patch(text): - """Parse the content of a patch string and return a list of modified intervals - - >>> parse_patch(example_patch) - [[2, 8], [13, 21]] - """ - re_chunk_header = re.compile("""\@\@\s* - \-(?P\d+),(?P\d+)\s* - \+(?P\d+),(?P\d+)\s* - \@\@ - """, re.VERBOSE) - modified_intervals = [] - for m in re_chunk_header.finditer(text): - old_start_line, old_num_lines, _, _ = m.groups() - modified_intervals.append([int(old_start_line), int(old_start_line) + int(old_num_lines) - 1]) - - return modified_intervals - -if __name__ == "__main__": - import doctest - doctest.testmod() \ No newline at end of file diff --git a/persper/graphs/patch_parser.py b/persper/graphs/patch_parser.py deleted file mode 100644 index b021741b584..00000000000 --- a/persper/graphs/patch_parser.py +++ /dev/null @@ -1,87 +0,0 @@ -import re - - -class PatchParser(): - - def __init__(self): - self.re_chunk_header = re.compile("""\@\@\s* - \-(?P\d+)(,(?P\d+))?\s* - \+(?P\d+)(,(?P\d+))?\s* - \@\@ - """, re.VERBOSE) - - def clean(self): - self.additions = [] - self.deletions = [] - self.in_add, self.in_del = False, False - self.in_chunk = False - - self.add_start, self.del_start = None, None - self.add_num_lines = None - self.cur = None - - def start_add(self): - self.in_add = True - self.add_start = self.cur - 1 - self.add_num_lines = 1 - - def start_del(self): - self.in_del = True - self.del_start = self.cur - - def finish_add(self): - self.in_add = False - self.additions.append([self.add_start, self.add_num_lines]) - - def finish_del(self): - self.in_del = False - self.deletions.append([self.del_start, self.cur - 1]) - - def parse(self, text): - self.clean() - for line in text.split('\n'): - line = line.strip() - if not self.in_chunk: - if line.startswith('@@'): - self.in_chunk = True - else: - continue - - if line.startswith('@@'): - m = self.re_chunk_header.search(line) - self.cur = max(int(m.groups()[0]), 1) - elif line.startswith('-'): - # print("in minus") - if self.in_add: - self.finish_add() - self.start_del() - elif self.in_del: - pass - else: - self.start_del() - self.cur += 1 # always increment in minus - elif line.startswith('+'): - # print("in plus") - if self.in_add: - self.add_num_lines += 1 - elif self.in_del: - self.finish_del() - self.start_add() - else: - self.start_add() - else: - # print("in blank") - if self.in_add: - self.finish_add() - elif self.in_del: - self.finish_del() - else: - pass - self.cur += 1 # always increment in blank - - if self.in_add: - self.finish_add() - elif self.in_del: - self.finish_del() - - return self.additions, self.deletions diff --git a/persper/graphs/processor.py b/persper/graphs/processor.py deleted file mode 100644 index 69bc615c85b..00000000000 --- a/persper/graphs/processor.py +++ /dev/null @@ -1,375 +0,0 @@ -import os -import time -import pickle -from persper.graphs.git_tools import initialize_repo -from collections import deque -import functools -print = functools.partial(print, flush=True) - -EMPTY_TREE_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - - -def _diff_with_first_parent(commit): - if len(commit.parents) == 0: - prev_commit = EMPTY_TREE_SHA - else: - prev_commit = commit.parents[0] - # commit.diff automatically detect renames - return commit.diff(prev_commit, - create_patch=True, R=True, indent_heuristic=True) - - -def _fill_change_type(diff_index): - for diff in diff_index: - if diff.new_file: - diff.change_type = 'A' - elif diff.deleted_file: - diff.change_type = 'D' - elif diff.renamed: - diff.change_type = 'R' - elif (diff.a_blob and diff.b_blob and - (diff.a_blob != diff.b_blob)): - diff.change_type = 'M' - else: - diff.change_type = 'U' - - -def _print_diff_index(diff_index): - print(" ".join([diff.change_type for diff in diff_index])) - - -def _subject(msg): - return msg.split('\n', 1)[0].lstrip().rstrip() - - -class Processor(): - - def __init__(self, repo_path): - self.repo_path = repo_path - self.repo = initialize_repo(repo_path) - self.visited = set() - self.last_processed_commit = None - - def process(self, rev=None, - from_beginning=False, num_commits=None, - from_last_processed=False, end_commit_sha=None, - into_branches=False, - max_branch_length=100, - min_branch_date=None, - checkpoint_interval=100, - skip_work=False, - verbose=True): - """ - This function supports four ways of specifying the - range of commits to process: - - Method 1: rev - Pass `rev` parameter and set both - `from_beginning` and `from_last_processed` to False. - `rev` is the revision specifier which follows - an extended SHA-1 syntax. Please refer to git-rev-parse - for viable options. `rev' should only include commits - on the master branch. - - Method 2: from_beginning & num_commits (optional) - Set `from_beginning` to True and - pass `num_commits` parameter. Using this - method, the function will start from the - very first commit on the master branch and - process the following `num_commits` commits - (also on the master branch). - - Method 3: from_last_processed & num_commits - Set `from_last_processed` to True and pass - `num_commits` parameter. Using this method, the - function will resume processing from succeeding commit of - `self.last_processed_commit` for `num_commits` commits. - - Method 4: from_last_processed & end_commit_sha - Set `from_last_processed` to True and pass - `end_commit_sha` parameter. The range of continued processing - will be `self.last_processed_commit.hexsha..end_commit_sha`. - - Args: - rev: A string, see above. - num_commits: An int, see above. - from_beginning: A boolean flag, see above. - from_last_processed: A boolean flag, see above. - end_commit_sha: A string, see above. - into_branches: A boolean flag, if True, the process function - will operate in two phases. - - In the first phase, a call commit graph is contructed - by traversing the specified range of commits on the master - branch. Merge commits are detected and recorded if the - start commit (on master) and end/merge commit of the - corresponding branch are both within the range of - traversal. Those recorded merge commits do not - get any credits (thus they are not present in - self.history data structure). - - In the second phase, it traverses all the branches detected - in the first phase and assign them due credits. - - max_branch_length: An int, the maximum number of commits - to trace back before abortion. - min_branch_date: A python time object, stop backtracing if - a commit is authored before this time. - checkpoint_interval: An int. - """ - if not from_last_processed: - self._reset_state() - self.merge_commits = deque() - - # Method 2 - if from_beginning: - if num_commits is None: - num_commits = 0 - self.commits = list( - self.repo.iter_commits(first_parent=True))[-num_commits:] - - elif from_last_processed: - if not self.last_processed_commit: - print("No history exists yet, terminated.") - return - - # Method 4 - if end_commit_sha: - rev = self.last_processed_commit.hexsha + '..' + end_commit_sha - self.commits = list(self.repo.iter_commits( - rev, first_parent=True)) - # Method 3 - elif num_commits: - rev = self.last_processed_commit.hexsha + '..master' - self.commits = list(self.repo.iter_commits( - rev, first_parent=True))[-num_commits:] - else: - print("Both end_commit_sha and num_commits are None.") - return - - else: - # Method 1 - self.commits = list(self.repo.iter_commits(rev, first_parent=True)) - - if len(self.commits) > 0: - self.last_processed_commit = self.commits[0] - else: - print("The range specified is empty, terminated.") - return - - counter = 1 - start = time.time() - - # 1st phase - for commit in reversed(self.commits): - sha = commit.hexsha - self.visited.add(sha) - self._start_process_commit(commit) - - if verbose: - print('------ No.{} {} {} {} ------'.format( - counter, sha, _subject(commit.message), - time.strftime( - "%b %d %Y", time.gmtime(commit.authored_date) - )) - ) - else: - print('------ No.{} {} ------'.format(counter, sha)) - if counter % 100 == 0: - print('------ Used time: {} ------'.format( - time.time() - start)) - - if counter % checkpoint_interval == 0: - repo_name = os.path.basename(self.repo_path.rstrip('/')) - self.save(repo_name + '-1st-' + str(counter) + '.pickle') - - if into_branches: - is_merge_commit = len(commit.parents) > 1 - if is_merge_commit: - self.merge_commits.append(commit) - """ - is_merge_commit = self._detect_branch( - commit, max_branch_length, min_branch_date) - """ - else: - is_merge_commit = False - - if not skip_work: - # generate diff_index by diff commit with its first parent - diff_index = _diff_with_first_parent(commit) - - # figure out the change type of each entry in diff_index - _fill_change_type(diff_index) - - if verbose: - _print_diff_index(diff_index) - - for diff in diff_index: - if diff.change_type == 'U': - print('Unknown change type encountered.') - continue - - if diff.change_type == 'A': - self.on_add(diff, commit, is_merge_commit) - - elif diff.change_type == 'D': - self.on_delete(diff, commit, is_merge_commit) - - elif diff.change_type == 'R': - self.on_rename(diff, commit, is_merge_commit) - - else: - self.on_modify(diff, commit, is_merge_commit) - - counter += 1 - - # 2nd phase - if into_branches: - - commit_cnt = 1 - branch_cnt = 1 - start = time.time() - - print('\n------- 2nd phase -------\n') - - while len(self.merge_commits) > 0: - mc = self.merge_commits.popleft() - cur_commit = mc.parents[1] - branch_length = 0 - valid_branch = False - - while True: - - # stop tracing back along this branch - # if cur_commit has been visited - if cur_commit.hexsha in self.visited: - break - - # stop if we have reached time boundary - authored_date = time.gmtime(cur_commit.authored_date) - if min_branch_date and min_branch_date > authored_date: - break - - # stop if we have reached max_branch_length - if branch_length >= max_branch_length: - break - - # stop if we have reached the very first commit - if len(cur_commit.parents) == 0: - break - - # will process at least one commit for this branch - valid_branch = True - - # process this commit - if verbose: - print('------ Commit No.{} '.format(commit_cnt), - 'Branch No.{} {} {} {} ------'.format( - branch_cnt, - cur_commit.hexsha, - _subject(cur_commit.message), - time.strftime( - "%b %d %Y", - time.gmtime(cur_commit.authored_date) - ) - ) - ) - else: - print('------ Commit No.{} '.format(commit_cnt), - 'Branch No.{} {}------'.format( - branch_cnt, cur_commit.hexsha)) - - if commit_cnt % 100 == 0: - print('------ Used time: {} ------'.format( - time.time() - start)) - - if commit_cnt % checkpoint_interval == 0: - repo_name = os.path.basename( - self.repo_path.rstrip('/')) - self.save( - repo_name + '-2nd-' + str(counter) + '.pickle') - - self.visited.add(cur_commit.hexsha) - # add to queue if prev_commit is a merge commit - if len(cur_commit.parents) == 2: - self.merge_commits.append(cur_commit) - - if not skip_work: - self._start_process_commit(cur_commit) - diff_index = _diff_with_first_parent(cur_commit) - _fill_change_type(diff_index) - for diff in diff_index: - if diff.change_type == 'U': - print('Unknown change type encountered.') - continue - if diff.change_type == 'A': - self.on_add2(diff, cur_commit) - elif diff.change_type == 'D': - self.on_delete2(diff, cur_commit) - elif diff.change_type == 'R': - self.on_rename2(diff, cur_commit) - else: - self.on_modify2(diff, cur_commit) - - # get next commit - prev_commit = cur_commit.parents[0] - - cur_commit = prev_commit - branch_length += 1 - commit_cnt += 1 - - if valid_branch: - branch_cnt += 1 - - repo_name = os.path.basename(self.repo_path.rstrip('/')) - self.save(repo_name + '-finished.pickle') - - def _reset_state(self): - self.visited = set() - self.last_processed_commit = None - - def _start_process_commit(self, commit): - pass - - def set_repo_path(self, repo_path): - self.repo_path = repo_path - self.repo = initialize_repo(repo_path) - self.last_processed_commit = self.repo.commit(self.last_sha) - - def on_add(self, diff, commit, is_merge_commit): - return 0 - - def on_delete(self, diff, commit, is_merge_commit): - return 0 - - def on_rename(self, diff, commit, is_merge_commit): - return 0 - - def on_modify(self, diff, commit, is_merge_commit): - return 0 - - def on_add2(self, diff, commit): - return 0 - - def on_delete2(self, diff, commit): - return 0 - - def on_rename2(self, diff, commit): - return 0 - - def on_modify2(self, diff, commit): - return 0 - - def __getstate__(self): - state = { - 'visited': self.visited, - 'last_sha': self.last_processed_commit.hexsha - } - return state - - def __setstate__(self, state): - self.__dict__.update(state) - - def save(self, fname): - with open(fname, 'wb+') as f: - pickle.dump(self, f) diff --git a/persper/graphs/ruby_tools.py b/persper/graphs/ruby_tools.py deleted file mode 100644 index ef988a5161a..00000000000 --- a/persper/graphs/ruby_tools.py +++ /dev/null @@ -1,53 +0,0 @@ -import re -import os.path -import subprocess -from git import Repo -import networkx as nx - -from persper.graphs.write_graph_to_dot import write_G_to_dot_with_pr - - -def underscore_to_camelcase(value): - def camelcase(): - while True: - yield str.capitalize - - c = camelcase() - return "".join(next(c)(x) if x else '_' for x in value.split("_")) - - -def get_func_ranges_ruby(src, fname): - - def get_prefix(fname): - return fname - - fname = os.path.basename(fname).split('.')[0] - prefix = underscore_to_camelcase(get_prefix(fname)) - - re_def = re.compile("^\s*def\s+(?Pself\.)?(?P\w+\??)\s*\(?.*\)?$") - func_ids = [] - func_ranges = [] - ptr = -1 - num_lines = 0 - for lineno, line in enumerate(src.split('\n'), 1): - num_lines += 1 - m = re_def.search(line) - if m: - d = m.groupdict() - if d['class_method'] or fname.endswith('_helper.rb'): - op = "::" - else: - op = "#" - func_ids.append(prefix + op + d['func_name']) - - if ptr != -1: - func_ranges.append([ptr, lineno - 1]) - ptr = lineno - if ptr != -1: - func_ranges.append([ptr, num_lines]) - - return func_ids, func_ranges - - -def fname_filter_ruby(fname): - return fname.endswith('.rb') diff --git a/persper/graphs/srcml.py b/persper/graphs/srcml.py deleted file mode 100755 index 7b90d9090c5..00000000000 --- a/persper/graphs/srcml.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import shutil -import os -import glob -import subprocess -import tempfile -from lxml import etree - - -def copy_dir(src, dst, *, follow_sym=True): - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - if os.path.isdir(src): - shutil.copyfile(src, dst, follow_symlinks=follow_sym) - shutil.copystat(src, dst, follow_symlinks=follow_sym) - return dst - - -def transform_dir(input_dir, output_dir, extensions=('.c', '.h')): - """Run srcML recursively under a directory - - First copy directory structure from input_dir to output_dir, - then for every source file that ends with ext in extentions, - run srcML and output to corresponding directory under output_dir. - """ - # copy directory structure - input_dir = os.path.expanduser(input_dir) - output_dir = os.path.expanduser(output_dir) - if os.path.isdir(output_dir): - shutil.rmtree(output_dir) - shutil.copytree(input_dir, output_dir, copy_function=copy_dir) - - print("Transforming source code to xml...") - counter = 0 - for ext in extensions: - for fname in glob.iglob(input_dir + '/**/*' + ext, recursive=True): - if counter % 100 == 0: - print('Processed {}'.format(counter)) - # linux-kernel/arch/alpha/boot/bootp.c -> arch/alpha/boot/bootp.c - pre = os.path.commonprefix((input_dir, fname)) - rel = os.path.relpath(fname, pre) - output_path = os.path.join(output_dir, rel) + ".xml" - - cmd = 'srcml {} --position -o {}'.format(fname, output_path) - subprocess.call(cmd, shell=True) - - counter += 1 - print("Tranformation completed, {} processed.".format(counter)) - - -def transform_src_to_tree(source_code, ext='.c'): - root = None - try: - f = tempfile.NamedTemporaryFile(mode='wb+', delete=False) - f.write(source_code.encode('utf-8', 'replace')) - f.close() - except UnicodeEncodeError as e: - print("UnicodeEncodeError in transform_src_to_tree!") - if not f.closed: - f.close() - os.remove(f.name) - return None - - # rename so that srcml can open it - new_fname = f.name + ext - os.rename(f.name, new_fname) - xml_path = f.name + ".xml" - cmd = 'srcml {} --position -o {}'.format(new_fname, xml_path) - subprocess.call(cmd, shell=True) - try: - root = etree.parse(xml_path).getroot() - except: - print("Unable to parse xml file!") - finally: - if not f.closed: - f.close() - os.remove(new_fname) - if os.path.exists(xml_path): - os.remove(xml_path) - - return root - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('SOURCE', help='source dir', type=str) - parser.add_argument('OUTPUT', help='output dir', type=str) - args = parser.parse_args() - transform_dir(args.SOURCE, args.OUTPUT) - -if __name__ == '__main__': - main() diff --git a/persper/graphs/write_graph_to_dot.py b/persper/graphs/write_graph_to_dot.py deleted file mode 100644 index 1883f66f6a9..00000000000 --- a/persper/graphs/write_graph_to_dot.py +++ /dev/null @@ -1,31 +0,0 @@ -import matplotlib as mpl -import matplotlib.cm as cm - - -def map_to_color(x, m): - color_list = [int(255 * c) for c in m.to_rgba(x)] - return '#%02x%02x%02x%02x' % tuple(color_list) - - -def write_G_to_dot_with_pr(G, pr, fname, edge_attrib=None, header_lines=None): - norm = mpl.colors.Normalize(vmin=min(pr.values()), vmax=max(pr.values())) - cmap = cm.Blues - m = cm.ScalarMappable(norm, cmap=cmap) - with open(fname, 'w+') as f: - f.write('digraph graphname {\n') - if header_lines: - for line in header_lines: - f.write(line) - for n in G.nodes(data=False): - color_str = map_to_color(pr[n], m) - f.write( - '\"%s\" [style=filled fillcolor="%s" tooltip=\"%f\"];\n' % - (n, color_str, pr[n])) - for e in G.edges_iter(data=True): - if edge_attrib: - f.write('\"{}\" -> \"{}\" [ label=\"{}\"];\n'.format( - e[0], e[1], ' '.join(e[2][edge_attrib][:10]))) - else: - f.write('\"{}\" -> \"{}\";\n'.format(e[0], e[1])) - - f.write('}') diff --git a/test/cpp_test_repo/A/Exceptions.h b/test/cpp_test_repo/A/Exceptions.h new file mode 100644 index 00000000000..7b73c769e97 --- /dev/null +++ b/test/cpp_test_repo/A/Exceptions.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +// ????????????û???????????? +class Exception : public std::exception +{ + std::string msg; +public: + // ??????????????????????? + Exception() : msg("?????????????") {} + // ??????????????????????????? + explicit Exception(const std::string& message) : msg(message) + { +#if _DEBUG + std::cerr << "Exception constructed: " << message << std::endl; +#endif + } + ~Exception() noexcept override { } + const char* what() const noexcept override { return msg.c_str(); } +}; + +// ????????????????????????????????????????? +class InvalidCastException : public Exception +{ +public: + InvalidCastException(const std::string& message) : Exception(message) { } +}; + +// ????????????????????????? +class ArgumentException : public Exception +{ + static std::string BuildMessage(const std::string& message, const std::string& argumentName) + { + if (argumentName.empty()) return message; + return message + " ????????" + argumentName + "??"; + } + std::string _ArgumentName; +public: + const std::string& ArgumentName() const { return _ArgumentName; } +public: + ArgumentException(const std::string& message) + : Exception(message) { } + ArgumentException(const std::string& message, const std::string& argumentName) + : Exception(BuildMessage(message, argumentName)), _ArgumentName(argumentName) { } +}; + +// ????????????????????????????????????????? +class OperationFailureException : public Exception +{ +private: + int _ErrorCode; +public: + explicit OperationFailureException(int errorCode) + : Exception("???????????????" + std::to_string(errorCode) + "??"), _ErrorCode(errorCode) + { + } +}; diff --git a/test/cpp_test_repo/A/TextFileParsers.cpp b/test/cpp_test_repo/A/TextFileParsers.cpp new file mode 100644 index 00000000000..7eebc791e10 --- /dev/null +++ b/test/cpp_test_repo/A/TextFileParsers.cpp @@ -0,0 +1,160 @@ +#include "stdafx.h" +#include "TextFileParsers.h" + +using namespace std; +using namespace filesystem; + +RowReader& operator>>(RowReader& reader, string& rhs) +{ + // ʹÓÃÖƱí·û·Ö¸ô¡£ + getline(reader.ss, rhs, reader.delim); + // È¥³ý×óÓÒÁ½²àµÄ¿Õ°×¡£ + if (!reader.keepWhitespace) + { + // Left trim + auto wsEndsAt = find_if(rhs.begin(), rhs.end(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.begin(), wsEndsAt); + // Right trim + auto wsStartsAt = find_if(rhs.rbegin(), rhs.rend(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.rbegin().base(), rhs.end()); + } + return reader; +} + +RowReader& operator>>(RowReader& reader, int& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stoi(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, long& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stol(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, float& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stof(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, double& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stod(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, bool& rhs) +{ + string buffer{}; + if (reader >> buffer) + { + if (Equal(buffer, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else if (Equal(buffer, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else + { + try + { + auto value = stoi(buffer); + rhs = (value != 0); + } + catch (const exception&) + { + throw invalid_argument("Cannot convert to bool."); + } + } + } + return reader; +} + +void ConfigurationParser::Load(istream& inputStream) +{ + string buffer{}; + stringstream ss{}; + size_t lineNumber = 0; + while (getline(inputStream, buffer)) + { + lineNumber++; + ss.clear(); + ss.str(buffer); + string key{}; + char ch; + if (!(ss >> key)) continue; + if (key[0] == '#') continue; + if (!(ss >> ch) || ch != '=') + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£º¡°=¡±¡£ÐУº" + to_string(lineNumber) + "¡£"); + string value{}; + if (!(ss >> value)) + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£ºÅäÖÃÖµ¡£ÐУº" + to_string(lineNumber) + "¡£"); + // ISSUE Ä¿Ç°ÅäÖÃÖµÖв»ÄÜ°üº¬¿Õ¸ñ£¬·ñÔò»áÔÚ¿Õ¸ñ´¦½Ø¶Ï¡£ + entries[key] = value; + } +} + +std::string ConfigurationParser::GetString(const std::string& key, const std::string& defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + return v->second; +} + +int ConfigurationParser::GetInt(const std::string& key, int defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stoi(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªint¡£")); + } +} + +double ConfigurationParser::GetDouble(const std::string& key, double defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªdouble¡£")); + } +} + +bool ConfigurationParser::GetBool(const std::string& key, bool defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + if (Equal(v->second, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return true; + else if (Equal(v->second, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return false; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªbool¡£")); + } +} + +ConfigurationParser::ConfigurationParser(istream& inputStream) : entries() +{ + Load(inputStream); +} + +ConfigurationParser::ConfigurationParser(path filePath) : entries() +{ + auto ifs = OpenAndValidate(filePath); + Load(ifs); +} diff --git a/test/cpp_test_repo/A/TextFileParsers.h b/test/cpp_test_repo/A/TextFileParsers.h new file mode 100644 index 00000000000..73952b0deda --- /dev/null +++ b/test/cpp_test_repo/A/TextFileParsers.h @@ -0,0 +1,121 @@ +#pragma once +#include +#include "Utility.h" + +// ÓÃÓÚ´ÓÊäÈëÁ÷ÖÐÌáÈ¡Ò»ÐУ¬²¢Êä³ö¡£ +class RowReader +{ +private: + std::stringstream ss; + char delim; + bool keepWhitespace; + std::size_t _LineNumber; +public: + size_t LineNumber() const { return _LineNumber; } + void ResetLineNumber() { _LineNumber = 0; } + const std::stringstream& LineStream() const { return ss; } +public: + operator bool() const + { + return bool(ss); + } + bool operator !() const + { + return !ss; + } + template + friend TStream& operator>>(TStream& s, RowReader& reader); + friend RowReader& operator>>(RowReader& reader, std::string& rhs); +public: + /** + * \brief + * \param delim Áзָô·û + */ + explicit RowReader(bool keepWhitespace = false, char delim = '\t') : ss(), delim(delim), keepWhitespace(keepWhitespace), _LineNumber(0) + { + } +}; + +// ´ÓÊäÈëÁ÷ÖжÁÈëÒ»ÐзǿշÇ×¢ÊÍÐС£ +template +TStream& operator>>(TStream& s, RowReader& reader) +{ + std::string buffer{}; + while (getline(s, buffer)) + { + reader._LineNumber++; + // ¼ì²é´ËÐÐÊÇ·ñΪעÊÍ¡£ + // status + // 0 start/×ó²à¿Õ°× + // 1 # + // 2 ÆäËû×Ö·û + char status = 0; + for (auto& c : buffer) + { + switch (status) + { + case 0: + if (c == '#') + { + status = 1; + goto CHECK_STATUS; + } + if (c < 0 || !isspace(c)) + { + status = 2; + goto CHECK_STATUS; + } + break; + default: + assert(false); + break; + } + } + CHECK_STATUS: + switch (status) + { + case 0: + // ¿Õ°×ÐÐ + break; + case 1: + // ×¢ÊÍÐÐ + break; + case 2: + goto SET_RESULT; + default: + assert(false); + break; + } + } +SET_RESULT: + reader.ss.str(buffer); + reader.ss.clear(); + return s; +} + +RowReader& operator>>(RowReader& reader, std::string& rhs); + +RowReader& operator>>(RowReader& reader, int& rhs); + +RowReader& operator>>(RowReader& reader, long& rhs); + +RowReader& operator>>(RowReader& reader, float& rhs); + +RowReader& operator>>(RowReader& reader, double& rhs); + +RowReader& operator>>(RowReader& reader, bool& rhs); + +class ConfigurationParser +{ +private: + std::unordered_map entries; + void Load(std::istream& inputStream); +public: + std::string GetString(const std::string& key, const std::string& defaultValue) const; + int GetInt(const std::string& key, int defaultValue) const; + double GetDouble(const std::string& key, double defaultValue) const; + bool GetBool(const std::string& key, bool defaultValue) const; +public: + ConfigurationParser(std::istream& inputStream); + ConfigurationParser(std::filesystem::path filePath); +}; \ No newline at end of file diff --git a/test/cpp_test_repo/A/TypeTraits.h b/test/cpp_test_repo/A/TypeTraits.h new file mode 100644 index 00000000000..3169ac711d7 --- /dev/null +++ b/test/cpp_test_repo/A/TypeTraits.h @@ -0,0 +1,9 @@ +#pragma once +#include + +template +struct is_flags : std::false_type +{ +}; + +template constexpr bool is_flags_v = is_flags::value; diff --git a/test/cpp_test_repo/A/Utility.cpp b/test/cpp_test_repo/A/Utility.cpp new file mode 100644 index 00000000000..e043e34a718 --- /dev/null +++ b/test/cpp_test_repo/A/Utility.cpp @@ -0,0 +1,76 @@ +#include "stdafx.h" +#include "Utility.h" + +using namespace std; + +#define _DECLARE_ENUM(TYPE, MEMBER) case TYPE::MEMBER : return #MEMBER; +#define _DECLARE_ENUM_DEFAULT(TYPE) default : return string(#TYPE) + "::" + to_string((long)v); + +bool Equal(const string& lhs, const string& rhs, StringComparison comparision) +{ + if (&lhs == &rhs) return true; + size_t pos1 = 0, pos2 = 0; + size_t pos1r = lhs.size(), pos2r = rhs.size(); + if ((comparision & StringComparison::IgnoreSurroudingWhiteSpaces) + == StringComparison::IgnoreSurroudingWhiteSpaces) + { + while (pos1 < lhs.size() && isspace(lhs[pos1])) pos1++; + while (pos2 < lhs.size() && isspace(lhs[pos2])) pos2++; + while (pos1 > 0 && isspace(lhs[pos1 - 1])) pos1--; + while (pos2 > 0 && isspace(lhs[pos2 - 1])) pos2--; + } + if (pos1r - pos1 != pos2r - pos2) return false; + auto ignoreCase = (comparision & StringComparison::IgnoureCase) == StringComparison::IgnoureCase; + while (pos1 < pos1r) + { + if (ignoreCase) + { + if (tolower(lhs[pos1]) != tolower(rhs[pos1])) return false; + } else + { + if (lhs[pos1] != rhs[pos1]) return false; + } + pos1++; + pos2++; + } + return true; +} + +bool Confirm(const std::string& prompt) +{ + cout << prompt << " (Y/N)> " << flush; + while (true) + { + string buffer; + getline(cin, buffer); + stringstream ss(buffer); + if (ss >> buffer) + { + transform(buffer.begin(), buffer.end(), buffer.begin(), [](char c) {return tolower(c); }); + if (buffer == "y" || buffer == "yes") return true; + if (buffer == "n" || buffer == "no") return false; + } + cout << "ÎÞЧµÄÊäÈë¡£> " << flush; + } +} + +void ReportException(const exception& ex, int level) +{ + if (level > 0) + { + cerr << "<-"; + for (int i = 0; i < level; i++) cerr << '-'; + cerr << ' '; + } + cerr << "[" << typeid(ex).name() << "] " << ex.what() << endl; + try { + rethrow_if_nested(ex); + } + catch (const exception& subEx) { + ReportException(subEx, level + 1); + } + catch (...) + { + cerr << "[Unknown Exception]" << endl; + } +} diff --git a/test/cpp_test_repo/A/Utility.h b/test/cpp_test_repo/A/Utility.h new file mode 100644 index 00000000000..f618f0d1804 --- /dev/null +++ b/test/cpp_test_repo/A/Utility.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include "Exceptions.h" +#include +#include "TypeTraits.h" + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" + +#define ANSI_COLOR_BRIGHT "\x1b[1m" +#define ANSI_COLOR_RESET "\x1b[0m" + +namespace std { + class type_index; +} + +template +bool dynamic_kind_of(const TSrc* obj) +{ + return dynamic_cast(obj) != nullptr; +} + +template +bool pointer_kind_of(const std::shared_ptr obj) +{ + return std::dynamic_pointer_cast(obj) != nullptr; +} + +template +TDest safe_cast(TSrc obj) +{ + if (obj == nullptr) return nullptr; + auto p = dynamic_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::shared_ptr safe_pointer_cast(const std::shared_ptr& obj) +{ + if (obj == nullptr) return std::shared_ptr(); + auto p = std::dynamic_pointer_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::string StreamStatusToString(const TStream& stream) +{ + std::string status = stream.good() ? "good " : ""; + if (stream.eof()) status += "eof "; + if (stream.bad()) status += "bad "; + if (stream.fail()) status += "fail "; + return status; +} + +template +TStream OpenAndValidate(const TPath arg1) +{ + auto fs = TStream(arg1); + if (!fs) { + std::stringstream ss; + ss << "���Դ��ļ�" << arg1 << "ʱ��������" << StreamStatusToString(fs); + throw Exception(ss.str()); + } + return fs; +} + +// �����״̬����ȷ�ԡ��������ȷ����������쳣�� +template +void ValidateStream(const TStream& stream) +{ + if (!stream) { + std::stringstream ss; + ss << "��״̬����" << StreamStatusToString(stream); + throw Exception(ss.str()); + } +} + +// ���ڽ����� map::equal_range �Ⱥ����ķ���ֵת��Ϊ�ɱ� foreach �﷨���ܵĽṹ�� +template +class _RangeToEnumerable +{ + std::pair _Range; +public: + TIterator begin() { return _Range.first; } + TIterator end() { return _Range.second; } + bool empty() { return _Range.first == _Range.second; } + _RangeToEnumerable(const std::pair range) + : _Range(range) + { + + } +}; + +template +_RangeToEnumerable RangeToEnumerable(const std::pair range) +{ + return _RangeToEnumerable(range); +} + +inline std::string to_string(const std::pair& value) +{ + return "[" + value.first + ", " + value.second + "]"; +} + +enum class StringComparison +{ + None = 0, + IgnoreSurroudingWhiteSpaces, + IgnoureCase, +}; + +template<> +struct is_flags : std::true_type +{ + +}; + +bool Equal(const std::string& lhs, const std::string& rhs, StringComparison comparision = StringComparison::None); + +// �������÷�Χö�ٵİ�λ���� +template, int> = 0> +TEnum operator & (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) & static_cast(rhs)); +} + +template, int> = 0> +TEnum operator | (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +#define _RE_TRACE(iosExpr) //std::cout << "Trace:" << iosExpr << std::endl; + +bool Confirm(const std::string& prompt); + +struct ReliabilityNetworkEntry; +const char* FriendlyNameOf(const std::type_index& type); +const char* FriendlyNameOf(const type_info& type); +const char* FriendlyNameOf(const ReliabilityNetworkEntry& instance); +template +const char* FriendlyNameOf() +{ + return FriendlyNameOf(typeid(T)); +} + +// ��RAII�����ڵ��û������뿪ijһ�����ʱ���Զ�ִ��ijЩ�û�����������߼��� +// �÷��� +// ����Ҫ�����߼��Ĵ������ʹ�� +// BlockExitHandler cleanupHandler(....); +// ���ɡ� +// ע�⣺ +// ��Ҫ�������ͷ������������������ֶΡ� +// ��Ҫ�������Ͷ���Ϊ������������Ϊ�ᱻ�������Ż����� +class BlockExitHandler +{ + std::function handler; +public: + explicit BlockExitHandler(const std::function& handler) : handler(handler) + { + + } + BlockExitHandler(const BlockExitHandler&) = delete; + BlockExitHandler& operator=(const BlockExitHandler&) = delete; + ~BlockExitHandler() + { + try + { + handler(); + } catch (std::exception& e) + { + // �����������������쳣�� + std::cout << "BlockExitHandler: " << e.what() << std::endl; + } + } +}; + +void ReportException(const std::exception& ex, int level = 0); diff --git a/test/cpp_test_repo/A/main.cpp b/test/cpp_test_repo/A/main.cpp new file mode 100644 index 00000000000..df9b79355fd --- /dev/null +++ b/test/cpp_test_repo/A/main.cpp @@ -0,0 +1,13 @@ +#include "stdafx.h" +#include "TextFileParsers.h" +#include "Utility.h" + +using namespace std; + +int main(int argc, char* argv[]) +{ + auto ifs = OpenAndValidate("config.txt"); + auto parser = ConfigurationParser(ifs); + cout << parser.GetBool("testBool", false) << endl; + return 0; +} diff --git a/test/cpp_test_repo/A/stdafx.cpp b/test/cpp_test_repo/A/stdafx.cpp new file mode 100644 index 00000000000..1681a386b77 --- /dev/null +++ b/test/cpp_test_repo/A/stdafx.cpp @@ -0,0 +1,7 @@ +// stdafx.cpp : Ö»°üÀ¨±ê×¼°üº¬ÎļþµÄÔ´Îļþ +// stdafx.obj ½«°üº¬Ô¤±àÒëÀàÐÍÐÅÏ¢ + +#include "stdafx.h" + +// TODO: ÔÚ STDAFX.H ÖÐÒýÓÃÈκÎËùÐèµÄ¸½¼ÓÍ·Îļþ£¬ +//¶ø²»ÊÇÔÚ´ËÎļþÖÐÒýÓà diff --git a/test/cpp_test_repo/A/stdafx.h b/test/cpp_test_repo/A/stdafx.h new file mode 100644 index 00000000000..733498297e0 --- /dev/null +++ b/test/cpp_test_repo/A/stdafx.h @@ -0,0 +1,39 @@ +// stdafx.h : ±ê׼ϵͳ°üº¬ÎļþµÄ°üº¬Îļþ£¬ +// »òÊǾ­³£Ê¹Óõ«²»³£¸ü¸ÄµÄ +// Ìض¨ÓÚÏîÄ¿µÄ°üº¬Îļþ +// + +#pragma once + +// TODO: ÔÚ´Ë´¦ÒýÓóÌÐòÐèÒªµÄÆäËûÍ·Îļþ +// ÊÊÓÃÓÚÈí²âʹÓà +//#define _DOWNGRADED_DEMO +#define _SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// ½«ÊµÑéÐ﵀ filesystem ÃüÃû¿Õ¼äµ¼Èë std ÖС££¨2016£© +namespace std +{ + namespace filesystem = experimental::filesystem::v1; +} + + +using complexd = std::complex; \ No newline at end of file diff --git a/test/cpp_test_repo/B/Exceptions.h b/test/cpp_test_repo/B/Exceptions.h new file mode 100644 index 00000000000..7b73c769e97 --- /dev/null +++ b/test/cpp_test_repo/B/Exceptions.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +// ????????????û???????????? +class Exception : public std::exception +{ + std::string msg; +public: + // ??????????????????????? + Exception() : msg("?????????????") {} + // ??????????????????????????? + explicit Exception(const std::string& message) : msg(message) + { +#if _DEBUG + std::cerr << "Exception constructed: " << message << std::endl; +#endif + } + ~Exception() noexcept override { } + const char* what() const noexcept override { return msg.c_str(); } +}; + +// ????????????????????????????????????????? +class InvalidCastException : public Exception +{ +public: + InvalidCastException(const std::string& message) : Exception(message) { } +}; + +// ????????????????????????? +class ArgumentException : public Exception +{ + static std::string BuildMessage(const std::string& message, const std::string& argumentName) + { + if (argumentName.empty()) return message; + return message + " ????????" + argumentName + "??"; + } + std::string _ArgumentName; +public: + const std::string& ArgumentName() const { return _ArgumentName; } +public: + ArgumentException(const std::string& message) + : Exception(message) { } + ArgumentException(const std::string& message, const std::string& argumentName) + : Exception(BuildMessage(message, argumentName)), _ArgumentName(argumentName) { } +}; + +// ????????????????????????????????????????? +class OperationFailureException : public Exception +{ +private: + int _ErrorCode; +public: + explicit OperationFailureException(int errorCode) + : Exception("???????????????" + std::to_string(errorCode) + "??"), _ErrorCode(errorCode) + { + } +}; diff --git a/test/cpp_test_repo/B/TextFileParsers.cpp b/test/cpp_test_repo/B/TextFileParsers.cpp new file mode 100644 index 00000000000..7eebc791e10 --- /dev/null +++ b/test/cpp_test_repo/B/TextFileParsers.cpp @@ -0,0 +1,160 @@ +#include "stdafx.h" +#include "TextFileParsers.h" + +using namespace std; +using namespace filesystem; + +RowReader& operator>>(RowReader& reader, string& rhs) +{ + // ʹÓÃÖƱí·û·Ö¸ô¡£ + getline(reader.ss, rhs, reader.delim); + // È¥³ý×óÓÒÁ½²àµÄ¿Õ°×¡£ + if (!reader.keepWhitespace) + { + // Left trim + auto wsEndsAt = find_if(rhs.begin(), rhs.end(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.begin(), wsEndsAt); + // Right trim + auto wsStartsAt = find_if(rhs.rbegin(), rhs.rend(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.rbegin().base(), rhs.end()); + } + return reader; +} + +RowReader& operator>>(RowReader& reader, int& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stoi(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, long& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stol(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, float& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stof(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, double& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stod(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, bool& rhs) +{ + string buffer{}; + if (reader >> buffer) + { + if (Equal(buffer, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else if (Equal(buffer, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else + { + try + { + auto value = stoi(buffer); + rhs = (value != 0); + } + catch (const exception&) + { + throw invalid_argument("Cannot convert to bool."); + } + } + } + return reader; +} + +void ConfigurationParser::Load(istream& inputStream) +{ + string buffer{}; + stringstream ss{}; + size_t lineNumber = 0; + while (getline(inputStream, buffer)) + { + lineNumber++; + ss.clear(); + ss.str(buffer); + string key{}; + char ch; + if (!(ss >> key)) continue; + if (key[0] == '#') continue; + if (!(ss >> ch) || ch != '=') + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£º¡°=¡±¡£ÐУº" + to_string(lineNumber) + "¡£"); + string value{}; + if (!(ss >> value)) + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£ºÅäÖÃÖµ¡£ÐУº" + to_string(lineNumber) + "¡£"); + // ISSUE Ä¿Ç°ÅäÖÃÖµÖв»ÄÜ°üº¬¿Õ¸ñ£¬·ñÔò»áÔÚ¿Õ¸ñ´¦½Ø¶Ï¡£ + entries[key] = value; + } +} + +std::string ConfigurationParser::GetString(const std::string& key, const std::string& defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + return v->second; +} + +int ConfigurationParser::GetInt(const std::string& key, int defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stoi(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªint¡£")); + } +} + +double ConfigurationParser::GetDouble(const std::string& key, double defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªdouble¡£")); + } +} + +bool ConfigurationParser::GetBool(const std::string& key, bool defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + if (Equal(v->second, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return true; + else if (Equal(v->second, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return false; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªbool¡£")); + } +} + +ConfigurationParser::ConfigurationParser(istream& inputStream) : entries() +{ + Load(inputStream); +} + +ConfigurationParser::ConfigurationParser(path filePath) : entries() +{ + auto ifs = OpenAndValidate(filePath); + Load(ifs); +} diff --git a/test/cpp_test_repo/B/TextFileParsers.h b/test/cpp_test_repo/B/TextFileParsers.h new file mode 100644 index 00000000000..73952b0deda --- /dev/null +++ b/test/cpp_test_repo/B/TextFileParsers.h @@ -0,0 +1,121 @@ +#pragma once +#include +#include "Utility.h" + +// ÓÃÓÚ´ÓÊäÈëÁ÷ÖÐÌáÈ¡Ò»ÐУ¬²¢Êä³ö¡£ +class RowReader +{ +private: + std::stringstream ss; + char delim; + bool keepWhitespace; + std::size_t _LineNumber; +public: + size_t LineNumber() const { return _LineNumber; } + void ResetLineNumber() { _LineNumber = 0; } + const std::stringstream& LineStream() const { return ss; } +public: + operator bool() const + { + return bool(ss); + } + bool operator !() const + { + return !ss; + } + template + friend TStream& operator>>(TStream& s, RowReader& reader); + friend RowReader& operator>>(RowReader& reader, std::string& rhs); +public: + /** + * \brief + * \param delim Áзָô·û + */ + explicit RowReader(bool keepWhitespace = false, char delim = '\t') : ss(), delim(delim), keepWhitespace(keepWhitespace), _LineNumber(0) + { + } +}; + +// ´ÓÊäÈëÁ÷ÖжÁÈëÒ»ÐзǿշÇ×¢ÊÍÐС£ +template +TStream& operator>>(TStream& s, RowReader& reader) +{ + std::string buffer{}; + while (getline(s, buffer)) + { + reader._LineNumber++; + // ¼ì²é´ËÐÐÊÇ·ñΪעÊÍ¡£ + // status + // 0 start/×ó²à¿Õ°× + // 1 # + // 2 ÆäËû×Ö·û + char status = 0; + for (auto& c : buffer) + { + switch (status) + { + case 0: + if (c == '#') + { + status = 1; + goto CHECK_STATUS; + } + if (c < 0 || !isspace(c)) + { + status = 2; + goto CHECK_STATUS; + } + break; + default: + assert(false); + break; + } + } + CHECK_STATUS: + switch (status) + { + case 0: + // ¿Õ°×ÐÐ + break; + case 1: + // ×¢ÊÍÐÐ + break; + case 2: + goto SET_RESULT; + default: + assert(false); + break; + } + } +SET_RESULT: + reader.ss.str(buffer); + reader.ss.clear(); + return s; +} + +RowReader& operator>>(RowReader& reader, std::string& rhs); + +RowReader& operator>>(RowReader& reader, int& rhs); + +RowReader& operator>>(RowReader& reader, long& rhs); + +RowReader& operator>>(RowReader& reader, float& rhs); + +RowReader& operator>>(RowReader& reader, double& rhs); + +RowReader& operator>>(RowReader& reader, bool& rhs); + +class ConfigurationParser +{ +private: + std::unordered_map entries; + void Load(std::istream& inputStream); +public: + std::string GetString(const std::string& key, const std::string& defaultValue) const; + int GetInt(const std::string& key, int defaultValue) const; + double GetDouble(const std::string& key, double defaultValue) const; + bool GetBool(const std::string& key, bool defaultValue) const; +public: + ConfigurationParser(std::istream& inputStream); + ConfigurationParser(std::filesystem::path filePath); +}; \ No newline at end of file diff --git a/test/cpp_test_repo/B/TypeTraits.h b/test/cpp_test_repo/B/TypeTraits.h new file mode 100644 index 00000000000..3169ac711d7 --- /dev/null +++ b/test/cpp_test_repo/B/TypeTraits.h @@ -0,0 +1,9 @@ +#pragma once +#include + +template +struct is_flags : std::false_type +{ +}; + +template constexpr bool is_flags_v = is_flags::value; diff --git a/test/cpp_test_repo/B/Utility-1.cpp b/test/cpp_test_repo/B/Utility-1.cpp new file mode 100644 index 00000000000..e043e34a718 --- /dev/null +++ b/test/cpp_test_repo/B/Utility-1.cpp @@ -0,0 +1,76 @@ +#include "stdafx.h" +#include "Utility.h" + +using namespace std; + +#define _DECLARE_ENUM(TYPE, MEMBER) case TYPE::MEMBER : return #MEMBER; +#define _DECLARE_ENUM_DEFAULT(TYPE) default : return string(#TYPE) + "::" + to_string((long)v); + +bool Equal(const string& lhs, const string& rhs, StringComparison comparision) +{ + if (&lhs == &rhs) return true; + size_t pos1 = 0, pos2 = 0; + size_t pos1r = lhs.size(), pos2r = rhs.size(); + if ((comparision & StringComparison::IgnoreSurroudingWhiteSpaces) + == StringComparison::IgnoreSurroudingWhiteSpaces) + { + while (pos1 < lhs.size() && isspace(lhs[pos1])) pos1++; + while (pos2 < lhs.size() && isspace(lhs[pos2])) pos2++; + while (pos1 > 0 && isspace(lhs[pos1 - 1])) pos1--; + while (pos2 > 0 && isspace(lhs[pos2 - 1])) pos2--; + } + if (pos1r - pos1 != pos2r - pos2) return false; + auto ignoreCase = (comparision & StringComparison::IgnoureCase) == StringComparison::IgnoureCase; + while (pos1 < pos1r) + { + if (ignoreCase) + { + if (tolower(lhs[pos1]) != tolower(rhs[pos1])) return false; + } else + { + if (lhs[pos1] != rhs[pos1]) return false; + } + pos1++; + pos2++; + } + return true; +} + +bool Confirm(const std::string& prompt) +{ + cout << prompt << " (Y/N)> " << flush; + while (true) + { + string buffer; + getline(cin, buffer); + stringstream ss(buffer); + if (ss >> buffer) + { + transform(buffer.begin(), buffer.end(), buffer.begin(), [](char c) {return tolower(c); }); + if (buffer == "y" || buffer == "yes") return true; + if (buffer == "n" || buffer == "no") return false; + } + cout << "ÎÞЧµÄÊäÈë¡£> " << flush; + } +} + +void ReportException(const exception& ex, int level) +{ + if (level > 0) + { + cerr << "<-"; + for (int i = 0; i < level; i++) cerr << '-'; + cerr << ' '; + } + cerr << "[" << typeid(ex).name() << "] " << ex.what() << endl; + try { + rethrow_if_nested(ex); + } + catch (const exception& subEx) { + ReportException(subEx, level + 1); + } + catch (...) + { + cerr << "[Unknown Exception]" << endl; + } +} diff --git a/test/cpp_test_repo/B/Utility.h b/test/cpp_test_repo/B/Utility.h new file mode 100644 index 00000000000..f618f0d1804 --- /dev/null +++ b/test/cpp_test_repo/B/Utility.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include "Exceptions.h" +#include +#include "TypeTraits.h" + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" + +#define ANSI_COLOR_BRIGHT "\x1b[1m" +#define ANSI_COLOR_RESET "\x1b[0m" + +namespace std { + class type_index; +} + +template +bool dynamic_kind_of(const TSrc* obj) +{ + return dynamic_cast(obj) != nullptr; +} + +template +bool pointer_kind_of(const std::shared_ptr obj) +{ + return std::dynamic_pointer_cast(obj) != nullptr; +} + +template +TDest safe_cast(TSrc obj) +{ + if (obj == nullptr) return nullptr; + auto p = dynamic_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::shared_ptr safe_pointer_cast(const std::shared_ptr& obj) +{ + if (obj == nullptr) return std::shared_ptr(); + auto p = std::dynamic_pointer_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::string StreamStatusToString(const TStream& stream) +{ + std::string status = stream.good() ? "good " : ""; + if (stream.eof()) status += "eof "; + if (stream.bad()) status += "bad "; + if (stream.fail()) status += "fail "; + return status; +} + +template +TStream OpenAndValidate(const TPath arg1) +{ + auto fs = TStream(arg1); + if (!fs) { + std::stringstream ss; + ss << "���Դ��ļ�" << arg1 << "ʱ��������" << StreamStatusToString(fs); + throw Exception(ss.str()); + } + return fs; +} + +// �����״̬����ȷ�ԡ��������ȷ����������쳣�� +template +void ValidateStream(const TStream& stream) +{ + if (!stream) { + std::stringstream ss; + ss << "��״̬����" << StreamStatusToString(stream); + throw Exception(ss.str()); + } +} + +// ���ڽ����� map::equal_range �Ⱥ����ķ���ֵת��Ϊ�ɱ� foreach �﷨���ܵĽṹ�� +template +class _RangeToEnumerable +{ + std::pair _Range; +public: + TIterator begin() { return _Range.first; } + TIterator end() { return _Range.second; } + bool empty() { return _Range.first == _Range.second; } + _RangeToEnumerable(const std::pair range) + : _Range(range) + { + + } +}; + +template +_RangeToEnumerable RangeToEnumerable(const std::pair range) +{ + return _RangeToEnumerable(range); +} + +inline std::string to_string(const std::pair& value) +{ + return "[" + value.first + ", " + value.second + "]"; +} + +enum class StringComparison +{ + None = 0, + IgnoreSurroudingWhiteSpaces, + IgnoureCase, +}; + +template<> +struct is_flags : std::true_type +{ + +}; + +bool Equal(const std::string& lhs, const std::string& rhs, StringComparison comparision = StringComparison::None); + +// �������÷�Χö�ٵİ�λ���� +template, int> = 0> +TEnum operator & (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) & static_cast(rhs)); +} + +template, int> = 0> +TEnum operator | (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +#define _RE_TRACE(iosExpr) //std::cout << "Trace:" << iosExpr << std::endl; + +bool Confirm(const std::string& prompt); + +struct ReliabilityNetworkEntry; +const char* FriendlyNameOf(const std::type_index& type); +const char* FriendlyNameOf(const type_info& type); +const char* FriendlyNameOf(const ReliabilityNetworkEntry& instance); +template +const char* FriendlyNameOf() +{ + return FriendlyNameOf(typeid(T)); +} + +// ��RAII�����ڵ��û������뿪ijһ�����ʱ���Զ�ִ��ijЩ�û�����������߼��� +// �÷��� +// ����Ҫ�����߼��Ĵ������ʹ�� +// BlockExitHandler cleanupHandler(....); +// ���ɡ� +// ע�⣺ +// ��Ҫ�������ͷ������������������ֶΡ� +// ��Ҫ�������Ͷ���Ϊ������������Ϊ�ᱻ�������Ż����� +class BlockExitHandler +{ + std::function handler; +public: + explicit BlockExitHandler(const std::function& handler) : handler(handler) + { + + } + BlockExitHandler(const BlockExitHandler&) = delete; + BlockExitHandler& operator=(const BlockExitHandler&) = delete; + ~BlockExitHandler() + { + try + { + handler(); + } catch (std::exception& e) + { + // �����������������쳣�� + std::cout << "BlockExitHandler: " << e.what() << std::endl; + } + } +}; + +void ReportException(const std::exception& ex, int level = 0); diff --git a/test/cpp_test_repo/B/main.cpp b/test/cpp_test_repo/B/main.cpp new file mode 100644 index 00000000000..6d9f1638733 --- /dev/null +++ b/test/cpp_test_repo/B/main.cpp @@ -0,0 +1,17 @@ +#include "stdafx.h" +#include "TextFileParsers.h" +#include "Utility.h" + +using namespace std; + +int main(int argc, char* argv[]) +{ + auto ifs = OpenAndValidate("config.txt"); + auto parser = ConfigurationParser(ifs); + cout << parser.GetBool("testBool", false) << endl; + cout << parser.GetDouble("textDouble", 1.23) << endl; + cout << parser.GetString("rawValue", "test") << endl; + exception testException("message"); + ReportException(testException); + return 0; +} diff --git a/test/cpp_test_repo/B/stdafx.cpp b/test/cpp_test_repo/B/stdafx.cpp new file mode 100644 index 00000000000..1681a386b77 --- /dev/null +++ b/test/cpp_test_repo/B/stdafx.cpp @@ -0,0 +1,7 @@ +// stdafx.cpp : Ö»°üÀ¨±ê×¼°üº¬ÎļþµÄÔ´Îļþ +// stdafx.obj ½«°üº¬Ô¤±àÒëÀàÐÍÐÅÏ¢ + +#include "stdafx.h" + +// TODO: ÔÚ STDAFX.H ÖÐÒýÓÃÈκÎËùÐèµÄ¸½¼ÓÍ·Îļþ£¬ +//¶ø²»ÊÇÔÚ´ËÎļþÖÐÒýÓà diff --git a/test/cpp_test_repo/B/stdafx.h b/test/cpp_test_repo/B/stdafx.h new file mode 100644 index 00000000000..733498297e0 --- /dev/null +++ b/test/cpp_test_repo/B/stdafx.h @@ -0,0 +1,39 @@ +// stdafx.h : ±ê׼ϵͳ°üº¬ÎļþµÄ°üº¬Îļþ£¬ +// »òÊǾ­³£Ê¹Óõ«²»³£¸ü¸ÄµÄ +// Ìض¨ÓÚÏîÄ¿µÄ°üº¬Îļþ +// + +#pragma once + +// TODO: ÔÚ´Ë´¦ÒýÓóÌÐòÐèÒªµÄÆäËûÍ·Îļþ +// ÊÊÓÃÓÚÈí²âʹÓà +//#define _DOWNGRADED_DEMO +#define _SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// ½«ÊµÑéÐ﵀ filesystem ÃüÃû¿Õ¼äµ¼Èë std ÖС££¨2016£© +namespace std +{ + namespace filesystem = experimental::filesystem::v1; +} + + +using complexd = std::complex; \ No newline at end of file diff --git a/test/cpp_test_repo/C/CppProject1.vcxproj b/test/cpp_test_repo/C/CppProject1.vcxproj new file mode 100644 index 00000000000..5822b3b858e --- /dev/null +++ b/test/cpp_test_repo/C/CppProject1.vcxproj @@ -0,0 +1,133 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + 15.0 + {1C29D994-E2FF-43C4-949C-103F4BF43E08} + CppProject1 + 10.0.17763.0 + + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + Level3 + Disabled + true + true + + + + + Level3 + Disabled + true + true + + + + + Level3 + MaxSpeed + true + true + true + true + + + true + true + + + + + Level3 + MaxSpeed + true + true + true + true + + + true + true + + + + + + \ No newline at end of file diff --git a/test/cpp_test_repo/C/CppProject1.vcxproj.filters b/test/cpp_test_repo/C/CppProject1.vcxproj.filters new file mode 100644 index 00000000000..0fa98b2811d --- /dev/null +++ b/test/cpp_test_repo/C/CppProject1.vcxproj.filters @@ -0,0 +1,22 @@ + + + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/test/cpp_test_repo/C/Exceptions.h b/test/cpp_test_repo/C/Exceptions.h new file mode 100644 index 00000000000..7b73c769e97 --- /dev/null +++ b/test/cpp_test_repo/C/Exceptions.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +// ????????????û???????????? +class Exception : public std::exception +{ + std::string msg; +public: + // ??????????????????????? + Exception() : msg("?????????????") {} + // ??????????????????????????? + explicit Exception(const std::string& message) : msg(message) + { +#if _DEBUG + std::cerr << "Exception constructed: " << message << std::endl; +#endif + } + ~Exception() noexcept override { } + const char* what() const noexcept override { return msg.c_str(); } +}; + +// ????????????????????????????????????????? +class InvalidCastException : public Exception +{ +public: + InvalidCastException(const std::string& message) : Exception(message) { } +}; + +// ????????????????????????? +class ArgumentException : public Exception +{ + static std::string BuildMessage(const std::string& message, const std::string& argumentName) + { + if (argumentName.empty()) return message; + return message + " ????????" + argumentName + "??"; + } + std::string _ArgumentName; +public: + const std::string& ArgumentName() const { return _ArgumentName; } +public: + ArgumentException(const std::string& message) + : Exception(message) { } + ArgumentException(const std::string& message, const std::string& argumentName) + : Exception(BuildMessage(message, argumentName)), _ArgumentName(argumentName) { } +}; + +// ????????????????????????????????????????? +class OperationFailureException : public Exception +{ +private: + int _ErrorCode; +public: + explicit OperationFailureException(int errorCode) + : Exception("???????????????" + std::to_string(errorCode) + "??"), _ErrorCode(errorCode) + { + } +}; diff --git a/test/cpp_test_repo/C/TextFileParsers.cpp b/test/cpp_test_repo/C/TextFileParsers.cpp new file mode 100644 index 00000000000..ba40fd92ad6 --- /dev/null +++ b/test/cpp_test_repo/C/TextFileParsers.cpp @@ -0,0 +1,160 @@ +#include "stdafx.h" +#include "TextFileParsers.h" + +using namespace std; +using namespace filesystem; + +RowReader& operator>>(RowReader& reader, string& rhs) +{ + // ʹÓÃÖƱí·û·Ö¸ô¡£ + getline(reader.ss, rhs, reader.delim); + // È¥³ý×óÓÒÁ½²àµÄ¿Õ°×¡£ + if (!reader.keepWhitespace) + { + // Left trim + auto wsEndsAt = find_if(rhs.begin(), rhs.end(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.begin(), wsEndsAt); + // Right trim + auto wsStartsAt = find_if(rhs.rbegin(), rhs.rend(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.rbegin().base(), rhs.end()); + } + return reader; +} + +RowReader& operator>>(RowReader& reader, int& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stoi(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, long& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stol(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, float& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stof(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, double& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stod(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, bool& rhs) +{ + string buffer{}; + if (reader >> buffer) + { + if (Equal(buffer, "true", StringComparison::IgnoreCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else if (Equal(buffer, "false", StringComparison::IgnoreCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else + { + try + { + auto value = stoi(buffer); + rhs = (value != 0); + } + catch (const exception&) + { + throw invalid_argument("Cannot convert to bool."); + } + } + } + return reader; +} + +void ConfigurationParser::Load(istream& inputStream) +{ + string buffer{}; + stringstream ss{}; + size_t lineNumber = 0; + while (getline(inputStream, buffer)) + { + lineNumber++; + ss.clear(); + ss.str(buffer); + string key{}; + char ch; + if (!(ss >> key)) continue; + if (key[0] == '#') continue; + if (!(ss >> ch) || ch != '=') + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£º¡°=¡±¡£ÐУº" + to_string(lineNumber) + "¡£"); + string value{}; + if (!(ss >> value)) + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£ºÅäÖÃÖµ¡£ÐУº" + to_string(lineNumber) + "¡£"); + // ISSUE Ä¿Ç°ÅäÖÃÖµÖв»ÄÜ°üº¬¿Õ¸ñ£¬·ñÔò»áÔÚ¿Õ¸ñ´¦½Ø¶Ï¡£ + entries[key] = value; + } +} + +std::string ConfigurationParser::GetString(const std::string& key, const std::string& defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + return v->second; +} + +int ConfigurationParser::GetInt(const std::string& key, int defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stoi(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªint¡£")); + } +} + +double ConfigurationParser::GetDouble(const std::string& key, double defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªdouble¡£")); + } +} + +bool ConfigurationParser::GetBool(const std::string& key, bool defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + if (Equal(v->second, "true", StringComparison::IgnoreCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return true; + else if (Equal(v->second, "false", StringComparison::IgnoreCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return false; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªbool¡£")); + } +} + +ConfigurationParser::ConfigurationParser(istream& inputStream) : entries() +{ + Load(inputStream); +} + +ConfigurationParser::ConfigurationParser(path filePath) : entries() +{ + auto ifs = OpenAndValidate(filePath); + Load(ifs); +} diff --git a/test/cpp_test_repo/C/TextFileParsers.h b/test/cpp_test_repo/C/TextFileParsers.h new file mode 100644 index 00000000000..73952b0deda --- /dev/null +++ b/test/cpp_test_repo/C/TextFileParsers.h @@ -0,0 +1,121 @@ +#pragma once +#include +#include "Utility.h" + +// ÓÃÓÚ´ÓÊäÈëÁ÷ÖÐÌáÈ¡Ò»ÐУ¬²¢Êä³ö¡£ +class RowReader +{ +private: + std::stringstream ss; + char delim; + bool keepWhitespace; + std::size_t _LineNumber; +public: + size_t LineNumber() const { return _LineNumber; } + void ResetLineNumber() { _LineNumber = 0; } + const std::stringstream& LineStream() const { return ss; } +public: + operator bool() const + { + return bool(ss); + } + bool operator !() const + { + return !ss; + } + template + friend TStream& operator>>(TStream& s, RowReader& reader); + friend RowReader& operator>>(RowReader& reader, std::string& rhs); +public: + /** + * \brief + * \param delim Áзָô·û + */ + explicit RowReader(bool keepWhitespace = false, char delim = '\t') : ss(), delim(delim), keepWhitespace(keepWhitespace), _LineNumber(0) + { + } +}; + +// ´ÓÊäÈëÁ÷ÖжÁÈëÒ»ÐзǿշÇ×¢ÊÍÐС£ +template +TStream& operator>>(TStream& s, RowReader& reader) +{ + std::string buffer{}; + while (getline(s, buffer)) + { + reader._LineNumber++; + // ¼ì²é´ËÐÐÊÇ·ñΪעÊÍ¡£ + // status + // 0 start/×ó²à¿Õ°× + // 1 # + // 2 ÆäËû×Ö·û + char status = 0; + for (auto& c : buffer) + { + switch (status) + { + case 0: + if (c == '#') + { + status = 1; + goto CHECK_STATUS; + } + if (c < 0 || !isspace(c)) + { + status = 2; + goto CHECK_STATUS; + } + break; + default: + assert(false); + break; + } + } + CHECK_STATUS: + switch (status) + { + case 0: + // ¿Õ°×ÐÐ + break; + case 1: + // ×¢ÊÍÐÐ + break; + case 2: + goto SET_RESULT; + default: + assert(false); + break; + } + } +SET_RESULT: + reader.ss.str(buffer); + reader.ss.clear(); + return s; +} + +RowReader& operator>>(RowReader& reader, std::string& rhs); + +RowReader& operator>>(RowReader& reader, int& rhs); + +RowReader& operator>>(RowReader& reader, long& rhs); + +RowReader& operator>>(RowReader& reader, float& rhs); + +RowReader& operator>>(RowReader& reader, double& rhs); + +RowReader& operator>>(RowReader& reader, bool& rhs); + +class ConfigurationParser +{ +private: + std::unordered_map entries; + void Load(std::istream& inputStream); +public: + std::string GetString(const std::string& key, const std::string& defaultValue) const; + int GetInt(const std::string& key, int defaultValue) const; + double GetDouble(const std::string& key, double defaultValue) const; + bool GetBool(const std::string& key, bool defaultValue) const; +public: + ConfigurationParser(std::istream& inputStream); + ConfigurationParser(std::filesystem::path filePath); +}; \ No newline at end of file diff --git a/test/cpp_test_repo/C/TypeTraits.h b/test/cpp_test_repo/C/TypeTraits.h new file mode 100644 index 00000000000..3169ac711d7 --- /dev/null +++ b/test/cpp_test_repo/C/TypeTraits.h @@ -0,0 +1,9 @@ +#pragma once +#include + +template +struct is_flags : std::false_type +{ +}; + +template constexpr bool is_flags_v = is_flags::value; diff --git a/test/cpp_test_repo/C/Utility.cpp b/test/cpp_test_repo/C/Utility.cpp new file mode 100644 index 00000000000..884b24e131e --- /dev/null +++ b/test/cpp_test_repo/C/Utility.cpp @@ -0,0 +1,76 @@ +#include "stdafx.h" +#include "Utility.h" + +using namespace std; + +#define _DECLARE_ENUM(TYPE, MEMBER) case TYPE::MEMBER : return #MEMBER; +#define _DECLARE_ENUM_DEFAULT(TYPE) default : return string(#TYPE) + "::" + to_string((long)v); + +bool Equal(const string& lhs, const string& rhs, StringComparison comparision) +{ + if (&lhs == &rhs) return true; + size_t pos1 = 0, pos2 = 0; + size_t pos1r = lhs.size(), pos2r = rhs.size(); + if ((comparision & StringComparison::IgnoreSurroudingWhiteSpaces) + == StringComparison::IgnoreSurroudingWhiteSpaces) + { + while (pos1 < lhs.size() && isspace(lhs[pos1])) pos1++; + while (pos2 < lhs.size() && isspace(lhs[pos2])) pos2++; + while (pos1 > 0 && isspace(lhs[pos1 - 1])) pos1--; + while (pos2 > 0 && isspace(lhs[pos2 - 1])) pos2--; + } + if (pos1r - pos1 != pos2r - pos2) return false; + auto ignoreCase = (comparision & StringComparison::IgnoreCase) == StringComparison::IgnoreCase; + while (pos1 < pos1r) + { + if (ignoreCase) + { + if (tolower(lhs[pos1]) != tolower(rhs[pos1])) return false; + } else + { + if (lhs[pos1] != rhs[pos1]) return false; + } + pos1++; + pos2++; + } + return true; +} + +bool Confirm(const std::string& prompt) +{ + cout << prompt << " (Y/N)> " << flush; + while (true) + { + string buffer; + getline(cin, buffer); + stringstream ss(buffer); + if (ss >> buffer) + { + transform(buffer.begin(), buffer.end(), buffer.begin(), [](char c) {return tolower(c); }); + if (buffer == "y" || buffer == "yes") return true; + if (buffer == "n" || buffer == "no") return false; + } + cout << "ÎÞЧµÄÊäÈë¡£> " << flush; + } +} + +void ReportException(const exception& ex, int level) +{ + if (level > 0) + { + cerr << "<-"; + for (int i = 0; i < level; i++) cerr << '-'; + cerr << ' '; + } + cerr << "[" << typeid(ex).name() << "] " << ex.what() << endl; + try { + rethrow_if_nested(ex); + } + catch (const exception& subEx) { + ReportException(subEx, level + 1); + } + catch (...) + { + cerr << "[Unknown Exception]" << endl; + } +} diff --git a/test/cpp_test_repo/C/Utility.h b/test/cpp_test_repo/C/Utility.h new file mode 100644 index 00000000000..dd4d698d427 --- /dev/null +++ b/test/cpp_test_repo/C/Utility.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include "Exceptions.h" +#include +#include "TypeTraits.h" + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" + +#define ANSI_COLOR_BRIGHT "\x1b[1m" +#define ANSI_COLOR_RESET "\x1b[0m" + +namespace std { + class type_index; +} + +template +bool dynamic_kind_of(const TSrc* obj) +{ + return dynamic_cast(obj) != nullptr; +} + +template +bool pointer_kind_of(const std::shared_ptr obj) +{ + return std::dynamic_pointer_cast(obj) != nullptr; +} + +template +TDest safe_cast(TSrc obj) +{ + if (obj == nullptr) return nullptr; + auto p = dynamic_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::shared_ptr safe_pointer_cast(const std::shared_ptr& obj) +{ + if (obj == nullptr) return std::shared_ptr(); + auto p = std::dynamic_pointer_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::string StreamStatusToString(const TStream& stream) +{ + std::string status = stream.good() ? "good " : ""; + if (stream.eof()) status += "eof "; + if (stream.bad()) status += "bad "; + if (stream.fail()) status += "fail "; + return status; +} + +template +TStream OpenAndValidate(const TPath arg1) +{ + auto fs = TStream(arg1); + if (!fs) { + std::stringstream ss; + ss << "���Դ��ļ�" << arg1 << "ʱ��������" << StreamStatusToString(fs); + throw Exception(ss.str()); + } + return fs; +} + +// �����״̬����ȷ�ԡ��������ȷ����������쳣�� +template +void ValidateStream(const TStream& stream) +{ + if (!stream) { + std::stringstream ss; + ss << "��״̬����" << StreamStatusToString(stream); + throw Exception(ss.str()); + } +} + +// ���ڽ����� map::equal_range �Ⱥ����ķ���ֵת��Ϊ�ɱ� foreach �﷨���ܵĽṹ�� +template +class _RangeToEnumerable +{ + std::pair _Range; +public: + TIterator begin() { return _Range.first; } + TIterator end() { return _Range.second; } + bool empty() { return _Range.first == _Range.second; } + _RangeToEnumerable(const std::pair range) + : _Range(range) + { + + } +}; + +template +_RangeToEnumerable RangeToEnumerable(const std::pair range) +{ + return _RangeToEnumerable(range); +} + +inline std::string to_string(const std::pair& value) +{ + return "[" + value.first + ", " + value.second + "]"; +} + +enum class StringComparison +{ + None = 0, + IgnoreSurroudingWhiteSpaces, + IgnoreCase, +}; + +template<> +struct is_flags : std::true_type +{ + +}; + +bool Equal(const std::string& lhs, const std::string& rhs, StringComparison comparision = StringComparison::None); + +// �������÷�Χö�ٵİ�λ���� +template, int> = 0> +TEnum operator & (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) & static_cast(rhs)); +} + +template, int> = 0> +TEnum operator | (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +#define _RE_TRACE(iosExpr) //std::cout << "Trace:" << iosExpr << std::endl; + +bool Confirm(const std::string& prompt); + +struct ReliabilityNetworkEntry; +const char* FriendlyNameOf(const std::type_index& type); +const char* FriendlyNameOf(const type_info& type); +const char* FriendlyNameOf(const ReliabilityNetworkEntry& instance); +template +const char* FriendlyNameOf() +{ + return FriendlyNameOf(typeid(T)); +} + +// ��RAII�����ڵ��û������뿪ijһ�����ʱ���Զ�ִ��ijЩ�û�����������߼��� +// �÷��� +// ����Ҫ�����߼��Ĵ������ʹ�� +// BlockExitHandler cleanupHandler(....); +// ���ɡ� +// ע�⣺ +// ��Ҫ�������ͷ������������������ֶΡ� +// ��Ҫ�������Ͷ���Ϊ������������Ϊ�ᱻ�������Ż����� +class BlockExitHandler +{ + std::function handler; +public: + explicit BlockExitHandler(const std::function& handler) : handler(handler) + { + + } + BlockExitHandler(const BlockExitHandler&) = delete; + BlockExitHandler& operator=(const BlockExitHandler&) = delete; + ~BlockExitHandler() + { + try + { + handler(); + } catch (std::exception& e) + { + // �����������������쳣�� + std::cout << "BlockExitHandler: " << e.what() << std::endl; + } + } +}; + +void ReportException(const std::exception& ex, int level = 0); diff --git a/test/cpp_test_repo/C/main.cpp b/test/cpp_test_repo/C/main.cpp new file mode 100644 index 00000000000..df9b79355fd --- /dev/null +++ b/test/cpp_test_repo/C/main.cpp @@ -0,0 +1,13 @@ +#include "stdafx.h" +#include "TextFileParsers.h" +#include "Utility.h" + +using namespace std; + +int main(int argc, char* argv[]) +{ + auto ifs = OpenAndValidate("config.txt"); + auto parser = ConfigurationParser(ifs); + cout << parser.GetBool("testBool", false) << endl; + return 0; +} diff --git a/test/cpp_test_repo/C/stdafx.cpp b/test/cpp_test_repo/C/stdafx.cpp new file mode 100644 index 00000000000..1681a386b77 --- /dev/null +++ b/test/cpp_test_repo/C/stdafx.cpp @@ -0,0 +1,7 @@ +// stdafx.cpp : Ö»°üÀ¨±ê×¼°üº¬ÎļþµÄÔ´Îļþ +// stdafx.obj ½«°üº¬Ô¤±àÒëÀàÐÍÐÅÏ¢ + +#include "stdafx.h" + +// TODO: ÔÚ STDAFX.H ÖÐÒýÓÃÈκÎËùÐèµÄ¸½¼ÓÍ·Îļþ£¬ +//¶ø²»ÊÇÔÚ´ËÎļþÖÐÒýÓà diff --git a/test/cpp_test_repo/C/stdafx.h b/test/cpp_test_repo/C/stdafx.h new file mode 100644 index 00000000000..733498297e0 --- /dev/null +++ b/test/cpp_test_repo/C/stdafx.h @@ -0,0 +1,39 @@ +// stdafx.h : ±ê׼ϵͳ°üº¬ÎļþµÄ°üº¬Îļþ£¬ +// »òÊǾ­³£Ê¹Óõ«²»³£¸ü¸ÄµÄ +// Ìض¨ÓÚÏîÄ¿µÄ°üº¬Îļþ +// + +#pragma once + +// TODO: ÔÚ´Ë´¦ÒýÓóÌÐòÐèÒªµÄÆäËûÍ·Îļþ +// ÊÊÓÃÓÚÈí²âʹÓà +//#define _DOWNGRADED_DEMO +#define _SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// ½«ÊµÑéÐ﵀ filesystem ÃüÃû¿Õ¼äµ¼Èë std ÖС££¨2016£© +namespace std +{ + namespace filesystem = experimental::filesystem::v1; +} + + +using complexd = std::complex; \ No newline at end of file diff --git a/test/cpp_test_repo/D/Exceptions.h b/test/cpp_test_repo/D/Exceptions.h new file mode 100644 index 00000000000..7b73c769e97 --- /dev/null +++ b/test/cpp_test_repo/D/Exceptions.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include + +// ????????????û???????????? +class Exception : public std::exception +{ + std::string msg; +public: + // ??????????????????????? + Exception() : msg("?????????????") {} + // ??????????????????????????? + explicit Exception(const std::string& message) : msg(message) + { +#if _DEBUG + std::cerr << "Exception constructed: " << message << std::endl; +#endif + } + ~Exception() noexcept override { } + const char* what() const noexcept override { return msg.c_str(); } +}; + +// ????????????????????????????????????????? +class InvalidCastException : public Exception +{ +public: + InvalidCastException(const std::string& message) : Exception(message) { } +}; + +// ????????????????????????? +class ArgumentException : public Exception +{ + static std::string BuildMessage(const std::string& message, const std::string& argumentName) + { + if (argumentName.empty()) return message; + return message + " ????????" + argumentName + "??"; + } + std::string _ArgumentName; +public: + const std::string& ArgumentName() const { return _ArgumentName; } +public: + ArgumentException(const std::string& message) + : Exception(message) { } + ArgumentException(const std::string& message, const std::string& argumentName) + : Exception(BuildMessage(message, argumentName)), _ArgumentName(argumentName) { } +}; + +// ????????????????????????????????????????? +class OperationFailureException : public Exception +{ +private: + int _ErrorCode; +public: + explicit OperationFailureException(int errorCode) + : Exception("???????????????" + std::to_string(errorCode) + "??"), _ErrorCode(errorCode) + { + } +}; diff --git a/test/cpp_test_repo/D/TextFileParsers.cpp b/test/cpp_test_repo/D/TextFileParsers.cpp new file mode 100644 index 00000000000..7eebc791e10 --- /dev/null +++ b/test/cpp_test_repo/D/TextFileParsers.cpp @@ -0,0 +1,160 @@ +#include "stdafx.h" +#include "TextFileParsers.h" + +using namespace std; +using namespace filesystem; + +RowReader& operator>>(RowReader& reader, string& rhs) +{ + // ʹÓÃÖƱí·û·Ö¸ô¡£ + getline(reader.ss, rhs, reader.delim); + // È¥³ý×óÓÒÁ½²àµÄ¿Õ°×¡£ + if (!reader.keepWhitespace) + { + // Left trim + auto wsEndsAt = find_if(rhs.begin(), rhs.end(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.begin(), wsEndsAt); + // Right trim + auto wsStartsAt = find_if(rhs.rbegin(), rhs.rend(), [](char c) {return c < 0 || !isspace(c); }); + rhs.erase(rhs.rbegin().base(), rhs.end()); + } + return reader; +} + +RowReader& operator>>(RowReader& reader, int& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stoi(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, long& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stol(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, float& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stof(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, double& rhs) +{ + string buffer{}; + if (reader >> buffer) rhs = stod(buffer); + return reader; +} + +RowReader& operator>>(RowReader& reader, bool& rhs) +{ + string buffer{}; + if (reader >> buffer) + { + if (Equal(buffer, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else if (Equal(buffer, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + rhs = true; + else + { + try + { + auto value = stoi(buffer); + rhs = (value != 0); + } + catch (const exception&) + { + throw invalid_argument("Cannot convert to bool."); + } + } + } + return reader; +} + +void ConfigurationParser::Load(istream& inputStream) +{ + string buffer{}; + stringstream ss{}; + size_t lineNumber = 0; + while (getline(inputStream, buffer)) + { + lineNumber++; + ss.clear(); + ss.str(buffer); + string key{}; + char ch; + if (!(ss >> key)) continue; + if (key[0] == '#') continue; + if (!(ss >> ch) || ch != '=') + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£º¡°=¡±¡£ÐУº" + to_string(lineNumber) + "¡£"); + string value{}; + if (!(ss >> value)) + throw Exception("ÎÞЧµÄÅäÖÃÐС£ÆÚÍû£ºÅäÖÃÖµ¡£ÐУº" + to_string(lineNumber) + "¡£"); + // ISSUE Ä¿Ç°ÅäÖÃÖµÖв»ÄÜ°üº¬¿Õ¸ñ£¬·ñÔò»áÔÚ¿Õ¸ñ´¦½Ø¶Ï¡£ + entries[key] = value; + } +} + +std::string ConfigurationParser::GetString(const std::string& key, const std::string& defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + return v->second; +} + +int ConfigurationParser::GetInt(const std::string& key, int defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stoi(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªint¡£")); + } +} + +double ConfigurationParser::GetDouble(const std::string& key, double defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªdouble¡£")); + } +} + +bool ConfigurationParser::GetBool(const std::string& key, bool defaultValue) const +{ + auto v = entries.find(key); + if (v == entries.end()) return defaultValue; + if (Equal(v->second, "true", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return true; + else if (Equal(v->second, "false", StringComparison::IgnoureCase | StringComparison::IgnoreSurroudingWhiteSpaces)) + return false; + try + { + return stod(v->second); + } catch (const exception&) + { + throw_with_nested(Exception("ÎÞ·¨½«ÅäÖá°" + key + "¡±Öµ×ª»»Îªbool¡£")); + } +} + +ConfigurationParser::ConfigurationParser(istream& inputStream) : entries() +{ + Load(inputStream); +} + +ConfigurationParser::ConfigurationParser(path filePath) : entries() +{ + auto ifs = OpenAndValidate(filePath); + Load(ifs); +} diff --git a/test/cpp_test_repo/D/TextFileParsers.h b/test/cpp_test_repo/D/TextFileParsers.h new file mode 100644 index 00000000000..73952b0deda --- /dev/null +++ b/test/cpp_test_repo/D/TextFileParsers.h @@ -0,0 +1,121 @@ +#pragma once +#include +#include "Utility.h" + +// ÓÃÓÚ´ÓÊäÈëÁ÷ÖÐÌáÈ¡Ò»ÐУ¬²¢Êä³ö¡£ +class RowReader +{ +private: + std::stringstream ss; + char delim; + bool keepWhitespace; + std::size_t _LineNumber; +public: + size_t LineNumber() const { return _LineNumber; } + void ResetLineNumber() { _LineNumber = 0; } + const std::stringstream& LineStream() const { return ss; } +public: + operator bool() const + { + return bool(ss); + } + bool operator !() const + { + return !ss; + } + template + friend TStream& operator>>(TStream& s, RowReader& reader); + friend RowReader& operator>>(RowReader& reader, std::string& rhs); +public: + /** + * \brief + * \param delim Áзָô·û + */ + explicit RowReader(bool keepWhitespace = false, char delim = '\t') : ss(), delim(delim), keepWhitespace(keepWhitespace), _LineNumber(0) + { + } +}; + +// ´ÓÊäÈëÁ÷ÖжÁÈëÒ»ÐзǿշÇ×¢ÊÍÐС£ +template +TStream& operator>>(TStream& s, RowReader& reader) +{ + std::string buffer{}; + while (getline(s, buffer)) + { + reader._LineNumber++; + // ¼ì²é´ËÐÐÊÇ·ñΪעÊÍ¡£ + // status + // 0 start/×ó²à¿Õ°× + // 1 # + // 2 ÆäËû×Ö·û + char status = 0; + for (auto& c : buffer) + { + switch (status) + { + case 0: + if (c == '#') + { + status = 1; + goto CHECK_STATUS; + } + if (c < 0 || !isspace(c)) + { + status = 2; + goto CHECK_STATUS; + } + break; + default: + assert(false); + break; + } + } + CHECK_STATUS: + switch (status) + { + case 0: + // ¿Õ°×ÐÐ + break; + case 1: + // ×¢ÊÍÐÐ + break; + case 2: + goto SET_RESULT; + default: + assert(false); + break; + } + } +SET_RESULT: + reader.ss.str(buffer); + reader.ss.clear(); + return s; +} + +RowReader& operator>>(RowReader& reader, std::string& rhs); + +RowReader& operator>>(RowReader& reader, int& rhs); + +RowReader& operator>>(RowReader& reader, long& rhs); + +RowReader& operator>>(RowReader& reader, float& rhs); + +RowReader& operator>>(RowReader& reader, double& rhs); + +RowReader& operator>>(RowReader& reader, bool& rhs); + +class ConfigurationParser +{ +private: + std::unordered_map entries; + void Load(std::istream& inputStream); +public: + std::string GetString(const std::string& key, const std::string& defaultValue) const; + int GetInt(const std::string& key, int defaultValue) const; + double GetDouble(const std::string& key, double defaultValue) const; + bool GetBool(const std::string& key, bool defaultValue) const; +public: + ConfigurationParser(std::istream& inputStream); + ConfigurationParser(std::filesystem::path filePath); +}; \ No newline at end of file diff --git a/test/cpp_test_repo/D/TypeTraits.h b/test/cpp_test_repo/D/TypeTraits.h new file mode 100644 index 00000000000..3169ac711d7 --- /dev/null +++ b/test/cpp_test_repo/D/TypeTraits.h @@ -0,0 +1,9 @@ +#pragma once +#include + +template +struct is_flags : std::false_type +{ +}; + +template constexpr bool is_flags_v = is_flags::value; diff --git a/test/cpp_test_repo/D/Utility.cpp b/test/cpp_test_repo/D/Utility.cpp new file mode 100644 index 00000000000..e043e34a718 --- /dev/null +++ b/test/cpp_test_repo/D/Utility.cpp @@ -0,0 +1,76 @@ +#include "stdafx.h" +#include "Utility.h" + +using namespace std; + +#define _DECLARE_ENUM(TYPE, MEMBER) case TYPE::MEMBER : return #MEMBER; +#define _DECLARE_ENUM_DEFAULT(TYPE) default : return string(#TYPE) + "::" + to_string((long)v); + +bool Equal(const string& lhs, const string& rhs, StringComparison comparision) +{ + if (&lhs == &rhs) return true; + size_t pos1 = 0, pos2 = 0; + size_t pos1r = lhs.size(), pos2r = rhs.size(); + if ((comparision & StringComparison::IgnoreSurroudingWhiteSpaces) + == StringComparison::IgnoreSurroudingWhiteSpaces) + { + while (pos1 < lhs.size() && isspace(lhs[pos1])) pos1++; + while (pos2 < lhs.size() && isspace(lhs[pos2])) pos2++; + while (pos1 > 0 && isspace(lhs[pos1 - 1])) pos1--; + while (pos2 > 0 && isspace(lhs[pos2 - 1])) pos2--; + } + if (pos1r - pos1 != pos2r - pos2) return false; + auto ignoreCase = (comparision & StringComparison::IgnoureCase) == StringComparison::IgnoureCase; + while (pos1 < pos1r) + { + if (ignoreCase) + { + if (tolower(lhs[pos1]) != tolower(rhs[pos1])) return false; + } else + { + if (lhs[pos1] != rhs[pos1]) return false; + } + pos1++; + pos2++; + } + return true; +} + +bool Confirm(const std::string& prompt) +{ + cout << prompt << " (Y/N)> " << flush; + while (true) + { + string buffer; + getline(cin, buffer); + stringstream ss(buffer); + if (ss >> buffer) + { + transform(buffer.begin(), buffer.end(), buffer.begin(), [](char c) {return tolower(c); }); + if (buffer == "y" || buffer == "yes") return true; + if (buffer == "n" || buffer == "no") return false; + } + cout << "ÎÞЧµÄÊäÈë¡£> " << flush; + } +} + +void ReportException(const exception& ex, int level) +{ + if (level > 0) + { + cerr << "<-"; + for (int i = 0; i < level; i++) cerr << '-'; + cerr << ' '; + } + cerr << "[" << typeid(ex).name() << "] " << ex.what() << endl; + try { + rethrow_if_nested(ex); + } + catch (const exception& subEx) { + ReportException(subEx, level + 1); + } + catch (...) + { + cerr << "[Unknown Exception]" << endl; + } +} diff --git a/test/cpp_test_repo/D/Utility.h b/test/cpp_test_repo/D/Utility.h new file mode 100644 index 00000000000..f618f0d1804 --- /dev/null +++ b/test/cpp_test_repo/D/Utility.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include "Exceptions.h" +#include +#include "TypeTraits.h" + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" + +#define ANSI_COLOR_BRIGHT "\x1b[1m" +#define ANSI_COLOR_RESET "\x1b[0m" + +namespace std { + class type_index; +} + +template +bool dynamic_kind_of(const TSrc* obj) +{ + return dynamic_cast(obj) != nullptr; +} + +template +bool pointer_kind_of(const std::shared_ptr obj) +{ + return std::dynamic_pointer_cast(obj) != nullptr; +} + +template +TDest safe_cast(TSrc obj) +{ + if (obj == nullptr) return nullptr; + auto p = dynamic_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::shared_ptr safe_pointer_cast(const std::shared_ptr& obj) +{ + if (obj == nullptr) return std::shared_ptr(); + auto p = std::dynamic_pointer_cast(obj); + if (p == nullptr) throw InvalidCastException("ָ��������ʱָ������ת������Ч�ġ�"); + return p; +} + +template +std::string StreamStatusToString(const TStream& stream) +{ + std::string status = stream.good() ? "good " : ""; + if (stream.eof()) status += "eof "; + if (stream.bad()) status += "bad "; + if (stream.fail()) status += "fail "; + return status; +} + +template +TStream OpenAndValidate(const TPath arg1) +{ + auto fs = TStream(arg1); + if (!fs) { + std::stringstream ss; + ss << "���Դ��ļ�" << arg1 << "ʱ��������" << StreamStatusToString(fs); + throw Exception(ss.str()); + } + return fs; +} + +// �����״̬����ȷ�ԡ��������ȷ����������쳣�� +template +void ValidateStream(const TStream& stream) +{ + if (!stream) { + std::stringstream ss; + ss << "��״̬����" << StreamStatusToString(stream); + throw Exception(ss.str()); + } +} + +// ���ڽ����� map::equal_range �Ⱥ����ķ���ֵת��Ϊ�ɱ� foreach �﷨���ܵĽṹ�� +template +class _RangeToEnumerable +{ + std::pair _Range; +public: + TIterator begin() { return _Range.first; } + TIterator end() { return _Range.second; } + bool empty() { return _Range.first == _Range.second; } + _RangeToEnumerable(const std::pair range) + : _Range(range) + { + + } +}; + +template +_RangeToEnumerable RangeToEnumerable(const std::pair range) +{ + return _RangeToEnumerable(range); +} + +inline std::string to_string(const std::pair& value) +{ + return "[" + value.first + ", " + value.second + "]"; +} + +enum class StringComparison +{ + None = 0, + IgnoreSurroudingWhiteSpaces, + IgnoureCase, +}; + +template<> +struct is_flags : std::true_type +{ + +}; + +bool Equal(const std::string& lhs, const std::string& rhs, StringComparison comparision = StringComparison::None); + +// �������÷�Χö�ٵİ�λ���� +template, int> = 0> +TEnum operator & (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) & static_cast(rhs)); +} + +template, int> = 0> +TEnum operator | (TEnum lhs, TEnum rhs) +{ + using T = std::underlying_type_t; + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +#define _RE_TRACE(iosExpr) //std::cout << "Trace:" << iosExpr << std::endl; + +bool Confirm(const std::string& prompt); + +struct ReliabilityNetworkEntry; +const char* FriendlyNameOf(const std::type_index& type); +const char* FriendlyNameOf(const type_info& type); +const char* FriendlyNameOf(const ReliabilityNetworkEntry& instance); +template +const char* FriendlyNameOf() +{ + return FriendlyNameOf(typeid(T)); +} + +// ��RAII�����ڵ��û������뿪ijһ�����ʱ���Զ�ִ��ijЩ�û�����������߼��� +// �÷��� +// ����Ҫ�����߼��Ĵ������ʹ�� +// BlockExitHandler cleanupHandler(....); +// ���ɡ� +// ע�⣺ +// ��Ҫ�������ͷ������������������ֶΡ� +// ��Ҫ�������Ͷ���Ϊ������������Ϊ�ᱻ�������Ż����� +class BlockExitHandler +{ + std::function handler; +public: + explicit BlockExitHandler(const std::function& handler) : handler(handler) + { + + } + BlockExitHandler(const BlockExitHandler&) = delete; + BlockExitHandler& operator=(const BlockExitHandler&) = delete; + ~BlockExitHandler() + { + try + { + handler(); + } catch (std::exception& e) + { + // �����������������쳣�� + std::cout << "BlockExitHandler: " << e.what() << std::endl; + } + } +}; + +void ReportException(const std::exception& ex, int level = 0); diff --git a/test/cpp_test_repo/D/main.cpp b/test/cpp_test_repo/D/main.cpp new file mode 100644 index 00000000000..f7da13a1987 --- /dev/null +++ b/test/cpp_test_repo/D/main.cpp @@ -0,0 +1,14 @@ +#include "stdafx.h" +#include "TextFileParsers.h" +#include "Utility.h" + +using namespace std; + +int main(int argc, char* argv[]) +{ + auto ifs = ifstream("config.txt"); + string line{}; + getline(ifs, line); + cout << line << endl; + return 0; +} diff --git a/test/cpp_test_repo/D/stdafx.cpp b/test/cpp_test_repo/D/stdafx.cpp new file mode 100644 index 00000000000..1681a386b77 --- /dev/null +++ b/test/cpp_test_repo/D/stdafx.cpp @@ -0,0 +1,7 @@ +// stdafx.cpp : Ö»°üÀ¨±ê×¼°üº¬ÎļþµÄÔ´Îļþ +// stdafx.obj ½«°üº¬Ô¤±àÒëÀàÐÍÐÅÏ¢ + +#include "stdafx.h" + +// TODO: ÔÚ STDAFX.H ÖÐÒýÓÃÈκÎËùÐèµÄ¸½¼ÓÍ·Îļþ£¬ +//¶ø²»ÊÇÔÚ´ËÎļþÖÐÒýÓà diff --git a/test/cpp_test_repo/D/stdafx.h b/test/cpp_test_repo/D/stdafx.h new file mode 100644 index 00000000000..733498297e0 --- /dev/null +++ b/test/cpp_test_repo/D/stdafx.h @@ -0,0 +1,39 @@ +// stdafx.h : ±ê׼ϵͳ°üº¬ÎļþµÄ°üº¬Îļþ£¬ +// »òÊǾ­³£Ê¹Óõ«²»³£¸ü¸ÄµÄ +// Ìض¨ÓÚÏîÄ¿µÄ°üº¬Îļþ +// + +#pragma once + +// TODO: ÔÚ´Ë´¦ÒýÓóÌÐòÐèÒªµÄÆäËûÍ·Îļþ +// ÊÊÓÃÓÚÈí²âʹÓà +//#define _DOWNGRADED_DEMO +#define _SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// ½«ÊµÑéÐ﵀ filesystem ÃüÃû¿Õ¼äµ¼Èë std ÖС££¨2016£© +namespace std +{ + namespace filesystem = experimental::filesystem::v1; +} + + +using complexd = std::complex; \ No newline at end of file diff --git a/test/cpp_test_repo/cg.dot b/test/cpp_test_repo/cg.dot new file mode 100644 index 00000000000..42efa7add5e --- /dev/null +++ b/test/cpp_test_repo/cg.dot @@ -0,0 +1,3 @@ +digraph cpp_test_branch { + A -> B -> C -> D; +} diff --git a/test/pytest.ini b/test/pytest.ini new file mode 100644 index 00000000000..10805d1b1ee --- /dev/null +++ b/test/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +filterwarnings = + ignore:inspect\.getargspec\(\) is deprecated:DeprecationWarning diff --git a/test/test_analytics/baseline/cpp_test_repo/A.g.json b/test/test_analytics/baseline/cpp_test_repo/A.g.json new file mode 100644 index 00000000000..2594c7d3aa5 --- /dev/null +++ b/test/test_analytics/baseline/cpp_test_repo/A.g.json @@ -0,0 +1,854 @@ +{ + "edges": { + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)|->|static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "A", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "A", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|enum class StringComparison : int {}": { + "addedBy": "A", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "A", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|enum class StringComparison : int {}": { + "addedBy": "A", + "weight": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)|->|enum class StringComparison : int {}": { + "addedBy": "A", + "weight": null + }, + "int main(int argc, char *argv[])|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "A", + "weight": null + }, + "int main(int argc, char *argv[])|->|bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "addedBy": "A", + "weight": null + } + }, + "nodes": { + "ANSI_COLOR_BLUE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_BRIGHT": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_CYAN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_GREEN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_MAGENTA": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RED": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RESET": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_YELLOW": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler &BlockExitHandler::operator=(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::BlockExitHandler(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::~BlockExitHandler() noexcept": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "Exception::Exception()": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "Exception::~Exception() noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "InvalidCastException::InvalidCastException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 24, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, double &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, float &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, int &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, long &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, std::string &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "RowReader::operator bool() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "TDest safe_cast(TSrc obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator&(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator|(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::begin()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::end()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream &operator>>(TStream &s, RowReader &reader)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream OpenAndValidate(const TPath arg1)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM_DEFAULT": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RE_TRACE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable::_RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING": { + "files": [ + "stdafx.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "bool Confirm(const std::string &prompt)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 17, + "dels": 0 + } + }, + "size": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 29, + "dels": 0 + } + }, + "size": null + }, + "bool RowReader::operator!() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "bool _RangeToEnumerable::empty()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool dynamic_kind_of(const TSrc *obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool pointer_kind_of(const std::shared_ptr obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *Exception::what() const noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *FriendlyNameOf()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::string &ArgumentException::ArgumentName() const": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::stringstream &RowReader::LineStream() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "enum class StringComparison : int {}": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit BlockExitHandler::BlockExitHandler(const std::function &handler)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit Exception::Exception(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit OperationFailureException::OperationFailureException(int errorCode)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit RowReader::RowReader(bool keepWhitespace, char delim)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "inline std::string to_string(const std::pair &value)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "int ConfigurationParser::GetInt(const std::string &key, int defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int main(int argc, char *argv[])": { + "files": [ + "main.cpp" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + } + }, + "size": null + }, + "size_t RowReader::LineNumber() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "std::shared_ptr safe_pointer_cast(const std::shared_ptr &obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "std::string StreamStatusToString(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ConfigurationParser::Load(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 23, + "dels": 0 + } + }, + "size": null + }, + "void ReportException(const std::exception &ex, int level)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 20, + "dels": 0 + } + }, + "size": null + }, + "void RowReader::ResetLineNumber()": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ValidateStream(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/cpp_test_repo/B.g.json b/test/test_analytics/baseline/cpp_test_repo/B.g.json new file mode 100644 index 00000000000..d0db7b42971 --- /dev/null +++ b/test/test_analytics/baseline/cpp_test_repo/B.g.json @@ -0,0 +1,870 @@ +{ + "edges": { + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)|->|static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "A", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "A", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|enum class StringComparison : int {}": { + "addedBy": "A", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "A", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|enum class StringComparison : int {}": { + "addedBy": "A", + "weight": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)|->|enum class StringComparison : int {}": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|void ReportException(const std::exception &ex, int level)": { + "addedBy": "B", + "weight": null + } + }, + "nodes": { + "ANSI_COLOR_BLUE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_BRIGHT": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_CYAN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_GREEN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_MAGENTA": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RED": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RESET": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_YELLOW": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler &BlockExitHandler::operator=(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::BlockExitHandler(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::~BlockExitHandler() noexcept": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "Exception::Exception()": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "Exception::~Exception() noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "InvalidCastException::InvalidCastException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 24, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, double &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, float &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, int &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, long &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, std::string &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "RowReader::operator bool() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "TDest safe_cast(TSrc obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator&(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator|(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::begin()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::end()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream &operator>>(TStream &s, RowReader &reader)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream OpenAndValidate(const TPath arg1)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM": { + "files": [ + "Utility-1.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM_DEFAULT": { + "files": [ + "Utility-1.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RE_TRACE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable::_RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING": { + "files": [ + "stdafx.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "bool Confirm(const std::string &prompt)": { + "files": [ + "Utility-1.cpp" + ], + "history": { + "A": { + "adds": 17, + "dels": 0 + } + }, + "size": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "files": [ + "Utility-1.cpp" + ], + "history": { + "A": { + "adds": 29, + "dels": 0 + } + }, + "size": null + }, + "bool RowReader::operator!() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "bool _RangeToEnumerable::empty()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool dynamic_kind_of(const TSrc *obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool pointer_kind_of(const std::shared_ptr obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *Exception::what() const noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *FriendlyNameOf()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::string &ArgumentException::ArgumentName() const": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::stringstream &RowReader::LineStream() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "enum class StringComparison : int {}": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit BlockExitHandler::BlockExitHandler(const std::function &handler)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit Exception::Exception(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit OperationFailureException::OperationFailureException(int errorCode)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit RowReader::RowReader(bool keepWhitespace, char delim)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "inline std::string to_string(const std::pair &value)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "int ConfigurationParser::GetInt(const std::string &key, int defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int main(int argc, char *argv[])": { + "files": [ + "main.cpp" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "size_t RowReader::LineNumber() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "std::shared_ptr safe_pointer_cast(const std::shared_ptr &obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "std::string StreamStatusToString(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ConfigurationParser::Load(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 23, + "dels": 0 + } + }, + "size": null + }, + "void ReportException(const std::exception &ex, int level)": { + "files": [ + "Utility-1.cpp" + ], + "history": { + "A": { + "adds": 20, + "dels": 0 + } + }, + "size": null + }, + "void RowReader::ResetLineNumber()": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ValidateStream(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/cpp_test_repo/C.g.json b/test/test_analytics/baseline/cpp_test_repo/C.g.json new file mode 100644 index 00000000000..e8db0ec0531 --- /dev/null +++ b/test/test_analytics/baseline/cpp_test_repo/C.g.json @@ -0,0 +1,890 @@ +{ + "edges": { + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)|->|static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "C", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "C", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "C", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "C", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|enum class StringComparison : int {}": { + "addedBy": "C", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "C", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|enum class StringComparison : int {}": { + "addedBy": "C", + "weight": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)|->|enum class StringComparison : int {}": { + "addedBy": "C", + "weight": null + }, + "int main(int argc, char *argv[])|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "C", + "weight": null + }, + "int main(int argc, char *argv[])|->|bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "addedBy": "C", + "weight": null + }, + "int main(int argc, char *argv[])|->|double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|void ReportException(const std::exception &ex, int level)": { + "addedBy": "B", + "weight": null + } + }, + "nodes": { + "ANSI_COLOR_BLUE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_BRIGHT": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_CYAN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_GREEN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_MAGENTA": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RED": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RESET": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_YELLOW": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler &BlockExitHandler::operator=(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::BlockExitHandler(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::~BlockExitHandler() noexcept": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "Exception::Exception()": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "Exception::~Exception() noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "InvalidCastException::InvalidCastException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 24, + "dels": 0 + }, + "C": { + "adds": 2, + "dels": 2 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, double &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, float &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, int &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, long &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, std::string &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "RowReader::operator bool() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "TDest safe_cast(TSrc obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator&(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator|(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::begin()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::end()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream &operator>>(TStream &s, RowReader &reader)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream OpenAndValidate(const TPath arg1)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM_DEFAULT": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RE_TRACE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable::_RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING": { + "files": [ + "stdafx.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + }, + "C": { + "adds": 2, + "dels": 2 + } + }, + "size": null + }, + "bool Confirm(const std::string &prompt)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 17, + "dels": 0 + } + }, + "size": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 29, + "dels": 0 + }, + "C": { + "adds": 1, + "dels": 1 + } + }, + "size": null + }, + "bool RowReader::operator!() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "bool _RangeToEnumerable::empty()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool dynamic_kind_of(const TSrc *obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool pointer_kind_of(const std::shared_ptr obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *Exception::what() const noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *FriendlyNameOf()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::string &ArgumentException::ArgumentName() const": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::stringstream &RowReader::LineStream() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "enum class StringComparison : int {}": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + }, + "C": { + "adds": 1, + "dels": 1 + } + }, + "size": null + }, + "explicit BlockExitHandler::BlockExitHandler(const std::function &handler)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit Exception::Exception(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit OperationFailureException::OperationFailureException(int errorCode)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit RowReader::RowReader(bool keepWhitespace, char delim)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "inline std::string to_string(const std::pair &value)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "int ConfigurationParser::GetInt(const std::string &key, int defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int main(int argc, char *argv[])": { + "files": [ + "main.cpp" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 4, + "dels": 0 + }, + "C": { + "adds": 0, + "dels": 4 + } + }, + "size": null + }, + "size_t RowReader::LineNumber() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "std::shared_ptr safe_pointer_cast(const std::shared_ptr &obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "std::string StreamStatusToString(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ConfigurationParser::Load(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 23, + "dels": 0 + } + }, + "size": null + }, + "void ReportException(const std::exception &ex, int level)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 20, + "dels": 0 + } + }, + "size": null + }, + "void RowReader::ResetLineNumber()": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ValidateStream(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/cpp_test_repo/D.g.json b/test/test_analytics/baseline/cpp_test_repo/D.g.json new file mode 100644 index 00000000000..e5eb44a4f6d --- /dev/null +++ b/test/test_analytics/baseline/cpp_test_repo/D.g.json @@ -0,0 +1,910 @@ +{ + "edges": { + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)|->|static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "addedBy": "A", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "D", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "D", + "weight": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)|->|void ConfigurationParser::Load(std::istream &inputStream)": { + "addedBy": "D", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "D", + "weight": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)|->|enum class StringComparison : int {}": { + "addedBy": "D", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "addedBy": "D", + "weight": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const|->|enum class StringComparison : int {}": { + "addedBy": "D", + "weight": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)|->|enum class StringComparison : int {}": { + "addedBy": "D", + "weight": null + }, + "int main(int argc, char *argv[])|->|TStream OpenAndValidate(const TPath arg1)": { + "addedBy": "C", + "weight": null + }, + "int main(int argc, char *argv[])|->|bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "addedBy": "C", + "weight": null + }, + "int main(int argc, char *argv[])|->|double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "addedBy": "B", + "weight": null + }, + "int main(int argc, char *argv[])|->|void ReportException(const std::exception &ex, int level)": { + "addedBy": "B", + "weight": null + } + }, + "nodes": { + "ANSI_COLOR_BLUE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_BRIGHT": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_CYAN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_GREEN": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_MAGENTA": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RED": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_RESET": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ANSI_COLOR_YELLOW": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "ArgumentException::ArgumentException(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 2, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler &BlockExitHandler::operator=(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::BlockExitHandler(const BlockExitHandler &) = delete": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "BlockExitHandler::~BlockExitHandler() noexcept": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::filesystem::path filePath)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "ConfigurationParser::ConfigurationParser(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "Exception::Exception()": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "Exception::~Exception() noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "InvalidCastException::InvalidCastException(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, bool &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 24, + "dels": 0 + }, + "C": { + "adds": 2, + "dels": 2 + }, + "D": { + "adds": 2, + "dels": 2 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, double &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, float &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, int &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, long &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "RowReader &operator>>(RowReader &reader, std::string &rhs)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "RowReader::operator bool() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "TDest safe_cast(TSrc obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator&(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TEnum operator|(TEnum lhs, TEnum rhs)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::begin()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TIterator _RangeToEnumerable::end()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream &operator>>(TStream &s, RowReader &reader)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "TStream OpenAndValidate(const TPath arg1)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_DECLARE_ENUM_DEFAULT": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RE_TRACE": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable::_RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_RangeToEnumerable RangeToEnumerable(const std::pair range)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "_SILENCE_STDEXT_ALLOCATORS_DEPRECATION_WARNING": { + "files": [ + "stdafx.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool ConfigurationParser::GetBool(const std::string &key, bool defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 16, + "dels": 0 + }, + "C": { + "adds": 2, + "dels": 2 + }, + "D": { + "adds": 2, + "dels": 2 + } + }, + "size": null + }, + "bool Confirm(const std::string &prompt)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 17, + "dels": 0 + } + }, + "size": null + }, + "bool Equal(const std::string &lhs, const std::string &rhs, StringComparison comparision)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 29, + "dels": 0 + }, + "C": { + "adds": 1, + "dels": 1 + }, + "D": { + "adds": 1, + "dels": 1 + } + }, + "size": null + }, + "bool RowReader::operator!() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "bool _RangeToEnumerable::empty()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool dynamic_kind_of(const TSrc *obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "bool pointer_kind_of(const std::shared_ptr obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *Exception::what() const noexcept": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const char *FriendlyNameOf()": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::string &ArgumentException::ArgumentName() const": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "const std::stringstream &RowReader::LineStream() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "double ConfigurationParser::GetDouble(const std::string &key, double defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "enum class StringComparison : int {}": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + }, + "C": { + "adds": 1, + "dels": 1 + }, + "D": { + "adds": 1, + "dels": 1 + } + }, + "size": null + }, + "explicit BlockExitHandler::BlockExitHandler(const std::function &handler)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit Exception::Exception(const std::string &message)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "explicit OperationFailureException::OperationFailureException(int errorCode)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "explicit RowReader::RowReader(bool keepWhitespace, char delim)": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "inline std::string to_string(const std::pair &value)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 4, + "dels": 0 + } + }, + "size": null + }, + "int ConfigurationParser::GetInt(const std::string &key, int defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int main(int argc, char *argv[])": { + "files": [ + "main.cpp" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 4, + "dels": 0 + }, + "C": { + "adds": 0, + "dels": 4 + }, + "D": { + "adds": 4, + "dels": 3 + } + }, + "size": null + }, + "size_t RowReader::LineNumber() const": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "static std::string ArgumentException::BuildMessage(const std::string &message, const std::string &argumentName)": { + "files": [ + "Exceptions.h" + ], + "history": { + "A": { + "adds": 5, + "dels": 0 + } + }, + "size": null + }, + "std::shared_ptr safe_pointer_cast(const std::shared_ptr &obj)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "std::string ConfigurationParser::GetString(const std::string &key, const std::string &defaultValue) const": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "std::string StreamStatusToString(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ConfigurationParser::Load(std::istream &inputStream)": { + "files": [ + "TextFileParsers.cpp" + ], + "history": { + "A": { + "adds": 23, + "dels": 0 + } + }, + "size": null + }, + "void ReportException(const std::exception &ex, int level)": { + "files": [ + "Utility.cpp" + ], + "history": { + "A": { + "adds": 20, + "dels": 0 + } + }, + "size": null + }, + "void RowReader::ResetLineNumber()": { + "files": [ + "TextFileParsers.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + }, + "void ValidateStream(const TStream &stream)": { + "files": [ + "Utility.h" + ], + "history": { + "A": { + "adds": 1, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/A.g.json b/test/test_analytics/baseline/feature_branch/A.g.json new file mode 100644 index 00000000000..fd012a79514 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/A.g.json @@ -0,0 +1,29 @@ +{ + "edges": {}, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/B.g.json b/test/test_analytics/baseline/feature_branch/B.g.json new file mode 100644 index 00000000000..a186e6d2226 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/B.g.json @@ -0,0 +1,57 @@ +{ + "edges": {}, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/C.g.json b/test/test_analytics/baseline/feature_branch/C.g.json new file mode 100644 index 00000000000..9992cb0fd09 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/C.g.json @@ -0,0 +1,70 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "C", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "C", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/D.g.json b/test/test_analytics/baseline/feature_branch/D.g.json new file mode 100644 index 00000000000..488d426a937 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/D.g.json @@ -0,0 +1,94 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/E.g.json b/test/test_analytics/baseline/feature_branch/E.g.json new file mode 100644 index 00000000000..26ef09a68c0 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/E.g.json @@ -0,0 +1,150 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + }, + "I": { + "adds": 26, + "dels": 9 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 25 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/F.g.json b/test/test_analytics/baseline/feature_branch/F.g.json new file mode 100644 index 00000000000..4c8ee3397cc --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/F.g.json @@ -0,0 +1,178 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int count()": { + "addedBy": "F", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "F", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "F", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int count()": { + "files": [ + "feature-J.c" + ], + "history": { + "J": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + }, + "I": { + "adds": 26, + "dels": 9 + } + }, + "size": null + }, + "void display(struct node *r)": { + "files": [ + "feature-J.c" + ], + "history": { + "J": { + "adds": 14, + "dels": 0 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 25 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/G.g.json b/test/test_analytics/baseline/feature_branch/G.g.json new file mode 100644 index 00000000000..e933b06f594 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/G.g.json @@ -0,0 +1,94 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "C", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "C", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/H.g.json b/test/test_analytics/baseline/feature_branch/H.g.json new file mode 100644 index 00000000000..6d72823868b --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/H.g.json @@ -0,0 +1,138 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/I.g.json b/test/test_analytics/baseline/feature_branch/I.g.json new file mode 100644 index 00000000000..26ef09a68c0 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/I.g.json @@ -0,0 +1,150 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + }, + "I": { + "adds": 26, + "dels": 9 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 25 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/J.g.json b/test/test_analytics/baseline/feature_branch/J.g.json new file mode 100644 index 00000000000..ae29e94aeef --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/J.g.json @@ -0,0 +1,174 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "G", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "G", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int count()": { + "files": [ + "feature-J.c" + ], + "history": { + "J": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + }, + "I": { + "adds": 26, + "dels": 9 + } + }, + "size": null + }, + "void display(struct node *r)": { + "files": [ + "feature-J.c" + ], + "history": { + "J": { + "adds": 14, + "dels": 0 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 25 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch/K.g.json b/test/test_analytics/baseline/feature_branch/K.g.json new file mode 100644 index 00000000000..c406a16e2d7 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch/K.g.json @@ -0,0 +1,182 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int count()": { + "addedBy": "F", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "F", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "F", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "F", + "weight": null + }, + "void insert(int num)|->|void add(int num)": { + "addedBy": "H", + "weight": null + }, + "void insert(int num)|->|void append(int num)": { + "addedBy": "H", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "G": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int count()": { + "files": [ + "feature-K.c" + ], + "history": { + "J": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 16, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 12, + "dels": 0 + }, + "I": { + "adds": 26, + "dels": 9 + } + }, + "size": null + }, + "void display(struct node *r)": { + "files": [ + "feature-K.c" + ], + "history": { + "J": { + "adds": 14, + "dels": 0 + }, + "K": { + "adds": 0, + "dels": 5 + } + }, + "size": null + }, + "void insert(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "H": { + "adds": 25, + "dels": 0 + }, + "I": { + "adds": 0, + "dels": 25 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/A.g.json b/test/test_analytics/baseline/feature_branch_first_parent/A.g.json new file mode 100644 index 00000000000..fd012a79514 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/A.g.json @@ -0,0 +1,29 @@ +{ + "edges": {}, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/B.g.json b/test/test_analytics/baseline/feature_branch_first_parent/B.g.json new file mode 100644 index 00000000000..a186e6d2226 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/B.g.json @@ -0,0 +1,57 @@ +{ + "edges": {}, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/C.g.json b/test/test_analytics/baseline/feature_branch_first_parent/C.g.json new file mode 100644 index 00000000000..9992cb0fd09 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/C.g.json @@ -0,0 +1,70 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "C", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "C", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/D.g.json b/test/test_analytics/baseline/feature_branch_first_parent/D.g.json new file mode 100644 index 00000000000..a1579f2d346 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/D.g.json @@ -0,0 +1,94 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "D": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/E.g.json b/test/test_analytics/baseline/feature_branch_first_parent/E.g.json new file mode 100644 index 00000000000..3c24ccfc482 --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/E.g.json @@ -0,0 +1,118 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "D": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 29, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/F.g.json b/test/test_analytics/baseline/feature_branch_first_parent/F.g.json new file mode 100644 index 00000000000..1ea3182ab6d --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/F.g.json @@ -0,0 +1,142 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "D": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int count()": { + "files": [ + "feature-J.c" + ], + "history": { + "F": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 29, + "dels": 0 + } + }, + "size": null + }, + "void display(struct node *r)": { + "files": [ + "feature-J.c" + ], + "history": { + "F": { + "adds": 14, + "dels": 0 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/baseline/feature_branch_first_parent/K.g.json b/test/test_analytics/baseline/feature_branch_first_parent/K.g.json new file mode 100644 index 00000000000..32ca12c74da --- /dev/null +++ b/test/test_analytics/baseline/feature_branch_first_parent/K.g.json @@ -0,0 +1,146 @@ +{ + "edges": { + "char *str_append_chr(char *string, char append)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_append_chr(char *string, char append)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|char *str_append_chr(char *string, char append)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_equals(char *equal1, char *eqaul2)": { + "addedBy": "D", + "weight": null + }, + "char *str_replace(char *search, char *replace, char *subject)|->|int str_len(char *string)": { + "addedBy": "D", + "weight": null + } + }, + "nodes": { + "char *str_append(char *string, char *append)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 7, + "dels": 0 + }, + "B": { + "adds": 0, + "dels": 3 + } + }, + "size": null + }, + "char *str_append_chr(char *string, char append)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 3, + "dels": 0 + }, + "C": { + "adds": 30, + "dels": 4 + } + }, + "size": null + }, + "char *str_replace(char *search, char *replace, char *subject)": { + "files": [ + "feature-G.c" + ], + "history": { + "D": { + "adds": 26, + "dels": 0 + } + }, + "size": null + }, + "int count()": { + "files": [ + "feature-K.c" + ], + "history": { + "F": { + "adds": 12, + "dels": 0 + } + }, + "size": null + }, + "int str_equals(char *equal1, char *eqaul2)": { + "files": [ + "main.c" + ], + "history": { + "B": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "int str_len(char *string)": { + "files": [ + "main.c" + ], + "history": { + "A": { + "adds": 6, + "dels": 0 + } + }, + "size": null + }, + "void add(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 11, + "dels": 0 + } + }, + "size": null + }, + "void append(int num)": { + "files": [ + "feature-H.c" + ], + "history": { + "E": { + "adds": 29, + "dels": 0 + } + }, + "size": null + }, + "void display(struct node *r)": { + "files": [ + "feature-K.c" + ], + "history": { + "F": { + "adds": 14, + "dels": 0 + }, + "K": { + "adds": 0, + "dels": 5 + } + }, + "size": null + } + } +} \ No newline at end of file diff --git a/test/test_analytics/example5.patch b/test/test_analytics/example5.patch new file mode 100644 index 00000000000..c698ffad768 --- /dev/null +++ b/test/test_analytics/example5.patch @@ -0,0 +1,9 @@ +--- patch_error.c 2019-02-27 16:20:31.000000000 -0800 ++++ new_patch_error.c 2019-02-27 16:15:41.000000000 -0800 +@@ -9,4 +9,4 @@ + } + if(*eqaul1 == '\0' && *eqaul2 == '\0' ){return 0;} + else {return -1}; +-} ++} +\ No newline at end of file diff --git a/test/test_analytics/example6.patch b/test/test_analytics/example6.patch new file mode 100644 index 00000000000..c85e4882871 --- /dev/null +++ b/test/test_analytics/example6.patch @@ -0,0 +1,38 @@ +diff --git a/main.go b/main.go +index 5398e6d..2a81399 100644 +--- a/main.go ++++ b/main.go +@@ -9,22 +9,20 @@ type animal interface { + + type cat int + type dog int +- + func (c cat) printInfo(){ + fmt.Println("a cat") + } + +-func (d dog) printInfo(){ ++func (c dog) printInfo(){ + fmt.Println("a dog") + } +- ++func invoke(a animal){ ++ a.printInfo() ++} + func main() { +- var a animal + var c cat +- a=c +- a.printInfo() +- //other type +- var d dog +- a=d +- a.printInfo() +-} +\ No newline at end of file ++ var d dog ++ //as value convert ++ invoke(c) ++ invoke(d) ++} + diff --git a/test/test_analytics/test_analyzer.py b/test/test_analytics/test_analyzer.py index a5921bd81b7..fd011d463aa 100644 --- a/test/test_analytics/test_analyzer.py +++ b/test/test_analytics/test_analyzer.py @@ -25,39 +25,79 @@ def az(): return Analyzer(repo_path, CGraphServer(C_FILENAME_REGEXES)) -def test_az_basic(az): - az.analyze(from_beginning=True) +@pytest.mark.asyncio +async def test_analyzer_master_only(az): + await az.analyze(from_beginning=True) ccgraph = az.get_graph() history_truth = { - 'K': {'display': 5}, - 'F': {'display': 14, 'count': 12}, - 'E': {'append': 29, 'add': 11}, - 'D': {'str_replace': 26}, - 'C': {'str_append_chr': 34, 'str_equals': 1}, - # Commit 'B' adds function "str_append_chr" for 7 lines - # Is it thought to be 5 lines because of inperfect diff - 'B': {'str_append': 9, 'str_append_chr': 5, 'str_equals': 11}, - 'A': {'str_append': 7, 'str_len': 6}, + 'K': { + 'display': {'adds': 0, 'dels': 5} + }, + 'F': { + 'display': {'adds': 14, 'dels': 0}, + 'count': {'adds': 12, 'dels': 0} + }, + 'E': { + 'append': {'adds': 29, 'dels': 0}, + 'add': {'adds': 11, 'dels': 0} + }, + 'D': { + 'str_replace': {'adds': 26, 'dels': 0} + }, + # TODO: fix \No newline at the end of file + 'C': { + 'str_append_chr': {'adds': 30, 'dels': 4}, + 'str_equals': {'adds': 0, 'dels': 1} + }, + # Commit `B` is an example of imperfect diff, + # it removes `str_append` and adds a new function `str_append_chr` + # but because they are too similar, + # diff doesn't separate these changes into two chunks + # please see here: https://github.com/UltimateBeaver/test_feature_branch/commit/caaac10f604ea7ac759c2147df8fb2b588ee2a27 + 'B': { + 'str_append': {'adds': 6, 'dels': 3}, + 'str_append_chr': {'adds': 3, 'dels': 2}, + 'str_equals': {'adds': 11, 'dels': 0} + }, + 'A': { + 'str_append': {'adds': 7, 'dels': 0}, + 'str_len': {'adds': 6, 'dels': 0} + }, - # branch J from commit A, merge back through F - 'J': {'count': 12, 'display': 14}, + # # branch J from commit A, merge back through F + # 'J': { + # 'count': {'adds': 12, 'dels': 0}, + # 'display': {'adds': 14, 'dels': 0} + # }, - # branch G from commit B, merge back through D - 'G': {'str_equals': 1, 'str_replace': 26}, + # # TODO: fix \No newline at the end of file + # # branch G from commit B, merge back through D + # 'G': { + # 'str_equals': {'adds': 0, 'dels': 1}, + # 'str_replace': {'adds': 26, 'dels': 0} + # }, - # branch H from commit D, merge back through E - 'I': {'add': 5, 'append': 35, 'insert': 25}, - 'H': {'add': 16, 'append': 12, 'insert': 25}, + # # branch H from commit D, merge back through E + # 'H': { + # 'add': {'adds': 16, 'dels': 0}, + # 'append': {'adds': 12, 'dels': 0}, + # 'insert': {'adds': 25, 'dels': 0} + # }, + # 'I': { + # 'add': {'adds': 0, 'dels': 5}, + # 'append': {'adds': 26, 'dels': 9}, + # 'insert': {'adds': 0, 'dels': 25} + # }, } commits = ccgraph.commits() for func, data in ccgraph.nodes(data=True): history = data['history'] - for cindex, csize in history.items(): - commit_message = commits[cindex]['message'] - assert(csize == history_truth[commit_message.strip()][func]) + for cid, chist in history.items(): + message = commits[cid]['message'] + assert chist == history_truth[message.strip()][func] edges_truth = [ # Edges existing in final snapshot @@ -80,4 +120,4 @@ def test_az_basic(az): ('str_append_chr', 'snprintf'), ('str_append', 'malloc') ] - assert(set(az._graph_server.get_graph().edges()) == set(edges_truth)) + assert set(az._graph_server.get_graph().edges()) == set(edges_truth) diff --git a/test/test_analytics/test_analyzer_go.py b/test/test_analytics/test_analyzer_go.py index a11d17e2e34..57f7782442f 100644 --- a/test/test_analytics/test_analyzer_go.py +++ b/test/test_analytics/test_analyzer_go.py @@ -73,6 +73,7 @@ def run_graph_server(graph_server_bin): def test_analzyer_go(az): +<<<<<<< HEAD graph_server_bin = build_graph_server() graph_server_proc = run_graph_server(graph_server_bin) @@ -130,3 +131,62 @@ def test_analzyer_go(az): finally: graph_server_proc.terminate() +======= + az._graph_server.reset_graph() + az.analyze() + ccgraph = az.get_graph() + + history_truth = { + 'D': {'Abs': 6, + 'funcA': 0, + 'main': 8, + "Absp": 3}, + 'C': {'Abs': 5, + 'funcA': 0, + 'funcB': 1, + 'main': 0}, + 'B': {'Abs': 3, + 'funcA': 0, + 'funcB': 3, + 'main': 5}, + 'A': {'Abs': 3, + 'funcA': 3, + 'main': 6} + } + + commits = ccgraph.commits() + for func, data in ccgraph.nodes(data=True): + history = data['history'] + for cindex, csize in history.items(): + commit_message = commits[int(cindex)]['message'] + assert csize == history_truth[commit_message.strip()][func] + + edges_added_by_A = set([ + ('Abs', 'Sqrt'), + ('funcA', 'Println'), + ('main', 'a'), + ('main', 'Println'), + ('main', 'Abs'), + ]) + + edges_added_by_B = set([ + ('Abs', 'funcA'), + ('funcB', 'Println'), + ('main', 'b'), + ('main', 'c'), + ]) + + edges_added_by_C = set([ + ('Abs', 'a'), + ('funcB', 'funcA') + ]) + + edges_added_by_D = set([ + ("Absp", "Sqrt"), + ("main", "Absp") + ]) + + print(set(az._graph_server.get_graph().edges())) + all_edges = edges_added_by_A.union(edges_added_by_B).union(edges_added_by_C).union(edges_added_by_D) + assert set(az._graph_server.get_graph().edges()) == all_edges +>>>>>>> c57baf152dc3a0b31a56a0487f789f54b9b43081 diff --git a/test/test_analytics/test_analyzer_go_1.py b/test/test_analytics/test_analyzer_go_1.py index 4f95cee7cd0..3386ba082c9 100644 --- a/test/test_analytics/test_analyzer_go_1.py +++ b/test/test_analytics/test_analyzer_go_1.py @@ -76,6 +76,7 @@ def run_graph_server(graph_server_bin): def test_analzyer_go(az): +<<<<<<< HEAD graph_server_bin = build_graph_server() graph_server_proc = run_graph_server(graph_server_bin) @@ -125,3 +126,44 @@ def test_analzyer_go(az): finally: graph_server_proc.terminate() +======= + az._graph_server.reset_graph() + az.analyze() + ccgraph = az.get_graph() + + history_truth = { + 'B': {'Abs': 3, + 'funcA': 0, + 'funcB': 3, + 'main': 5}, + 'A': {'Abs': 3, + 'funcA': 3, + 'main': 6} + } + + commits = ccgraph.commits() + for func, data in ccgraph.nodes(data=True): + history = data['history'] + for cindex, csize in history.items(): + commit_message = commits[int(cindex)]['message'] + assert csize == history_truth[commit_message.strip()][func] + + edges_added_by_A = set([ + ('Abs', 'Sqrt'), + ('funcA', 'Println'), + ('main', 'a'), + ('main', 'Println'), + ('main', 'Abs'), + ]) + + edges_added_by_B = set([ + ('Abs', 'funcA'), + ('funcB', 'Println'), + ('main', 'b'), + ('main', 'c'), + ]) + + + all_edges = edges_added_by_A.union(edges_added_by_B) + assert set(az._graph_server.get_graph().edges()) == all_edges +>>>>>>> c57baf152dc3a0b31a56a0487f789f54b9b43081 diff --git a/test/test_analytics/test_analyzer_go_2.py b/test/test_analytics/test_analyzer_go_2.py index 47f6af3fb2f..2380973f337 100644 --- a/test/test_analytics/test_analyzer_go_2.py +++ b/test/test_analytics/test_analyzer_go_2.py @@ -75,6 +75,7 @@ def run_graph_server(graph_server_bin): def test_analzyer_go(az): +<<<<<<< HEAD graph_server_bin = build_graph_server() graph_server_proc = run_graph_server(graph_server_bin) @@ -210,3 +211,41 @@ def test_analzyer_go(az): finally: graph_server_proc.terminate() +======= + az._graph_server.reset_graph() + az.analyze() + ccgraph = az.get_graph() + + history_truth = { + 'A': {'printInfo': 0, + 'main': 10}, + 'B': {'printInfo': 1, + 'main': 8, + "invoke":3} + } + + commits = ccgraph.commits() + for func, data in ccgraph.nodes(data=True): + history = data['history'] + for cindex, csize in history.items(): + commit_message = commits[int(cindex)]['message'] + assert csize == history_truth[commit_message.strip()][func] + + edges_added_by_A = set([ + ('main', 'printInfo'), + ('printInfo', 'Println'), + ]) + + edges_added_by_B = set([ + ('invoke', 'printInfo'), + ('main', 'invoke'), + ]) + + edges_added_by_C = set([ + ('Abs', 'a'), + ('funcB', 'funcA') + ]) + + all_edges = edges_added_by_A.union(edges_added_by_B) + assert set(az._graph_server.get_graph().edges()) == all_edges +>>>>>>> c57baf152dc3a0b31a56a0487f789f54b9b43081 diff --git a/test/test_analytics/test_analyzer_js.py b/test/test_analytics/test_analyzer_js.py index bb8592d5673..616cb093413 100644 --- a/test/test_analytics/test_analyzer_js.py +++ b/test/test_analytics/test_analyzer_js.py @@ -67,7 +67,7 @@ def test_az(az: Analyzer): history = data['history'] for cindex, csize in history.items(): commit_message = commits[cindex]['message'] - assert(csize == history_truth[commit_message.strip()][func]) + assert csize == history_truth[commit_message.strip()][func] edges_truth = [ ('main.js:funcB:9:12', 'Native:Window_prototype_print'), @@ -77,7 +77,7 @@ def test_az(az: Analyzer): ('main.js:main:7:16', 'main.js:funcA:3:5'), ('main.js:global', 'main.js:main:7:16') ] - assert(set(az.graph_server.get_graph().edges()) == set(edges_truth)) + assert set(az.graph_server.get_graph().edges()) == set(edges_truth) finally: p.terminate() diff --git a/test/test_analytics/test_analyzer_lsp_ccls.py b/test/test_analytics/test_analyzer_lsp_ccls.py new file mode 100644 index 00000000000..33d17e06f32 --- /dev/null +++ b/test/test_analytics/test_analyzer_lsp_ccls.py @@ -0,0 +1,124 @@ +import json +import logging +import os +import pickle +import subprocess +from pathlib import Path +from tempfile import mkdtemp + +import networkx.readwrite.json_graph +import pytest +from git import Commit +from networkx import Graph +from networkx.algorithms.isomorphism import is_isomorphic + +from persper.analytics.analyzer2 import Analyzer +from persper.analytics.call_commit_graph import CallCommitGraph, CommitIdGenerators +from persper.analytics.lsp_graph_server.ccls import CclsGraphServer +from persper.util.path import root_path + +from .utility.graph_baseline import GraphDumpAnalyzerObserver + +# Whether we are generating graph dump baseline, rather than testing for regression. +IS_GENERATING_BASELINE = True + +_logger = logging.getLogger() + +testDataRoot = os.path.dirname(os.path.abspath(__file__)) + + +def prepareRepo(repoName: str): + # build the repo first if not exists yet + repo_path = os.path.join(root_path, 'repos/' + repoName) + script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') + test_src_path = os.path.join(root_path, 'test/' + repoName) + if not os.path.isdir(repo_path): + cmd = '{} {}'.format(script_path, test_src_path) + subprocess.call(cmd, shell=True) + print("Repository path: ", repo_path) + return repo_path + + +def createCclsGraphServer(): + # create workspace root folder + CCLS_COMMAND = os.path.join(root_path, "bin/ccls") + DUMP_LOGS = False + workspaceRoot = mkdtemp() + print("Workspace root: ", workspaceRoot) + graphServer = CclsGraphServer(workspaceRoot, cacheRoot="./.ccls-cache", + languageServerCommand=CCLS_COMMAND + + (" -log-file=ccls.log" if DUMP_LOGS else ""), + dumpLogs=DUMP_LOGS, + graph=CallCommitGraph(commit_id_generator=CommitIdGenerators.fromComment)) + graphServer.reset_graph() + return graphServer + + +def createGraphDumpAnalyzerObserver(testName: str): + return GraphDumpAnalyzerObserver( + None if IS_GENERATING_BASELINE else + os.path.join(testDataRoot, "baseline/" + testName), + os.path.join(testDataRoot, "actualdump/" + testName), + dumpNaming=CommitIdGenerators.fromComment) + + +@pytest.mark.asyncio +async def testFeatureBranchFirstParent(): + """ + Tests test_feature_branch repos, only on topical branch. + """ + repoPath = prepareRepo("test_feature_branch") + graphServer = createCclsGraphServer() + analyzer = Analyzer(repoPath, graphServer, firstParentOnly=True) + async with graphServer: + analyzer.observer = createGraphDumpAnalyzerObserver( + "feature_branch_first_parent") + await analyzer.analyze() + + +@pytest.mark.asyncio +async def testFeatureBranch(): + """ + Tests test_feature_branch repos, on all branches. + """ + repoPath = prepareRepo("test_feature_branch") + graphServer = createCclsGraphServer() + analyzer = Analyzer(repoPath, graphServer, firstParentOnly=False) + async with graphServer: + analyzer.observer = createGraphDumpAnalyzerObserver("feature_branch") + await analyzer.analyze() + + +@pytest.mark.asyncio +async def testCppTestRepo(): + repoPath = prepareRepo("cpp_test_repo") + graphServer = createCclsGraphServer() + analyzer = Analyzer(repoPath, graphServer) + async with graphServer: + analyzer.observer = createGraphDumpAnalyzerObserver("cpp_test_repo") + await analyzer.analyze() + + +@pytest.mark.asyncio +async def testAnalyzerWithPickle(): + repoPath = prepareRepo("test_feature_branch") + graphServer = createCclsGraphServer() + analyzer = Analyzer(repoPath, graphServer) + pickleContent = None + async with graphServer: + analyzer.observer = createGraphDumpAnalyzerObserver( + "analyzer_pickling") + assert len(analyzer.visitedCommits) == 0 + await analyzer.analyze(2) + assert len(analyzer.visitedCommits) == 2 + await analyzer.analyze(2) + assert len(analyzer.visitedCommits) == 4 + pickleContent = pickle.dumps(analyzer) + + analyzer1: Analyzer = pickle.loads(pickleContent) + # Perhaps we need to set another temp folder for this. + graphServer1 = analyzer1.graphServer + analyzer1.observer = analyzer.observer + async with graphServer1: + assert analyzer1.visitedCommits == analyzer.visitedCommits + await analyzer1.analyze() diff --git a/test/test_analytics/test_call_commit_graph.py b/test/test_analytics/test_call_commit_graph.py index 7a49b6f3dac..aeae9cca858 100644 --- a/test/test_analytics/test_call_commit_graph.py +++ b/test/test_analytics/test_call_commit_graph.py @@ -1,4 +1,5 @@ import os +import pytest import shutil import subprocess from math import isclose @@ -23,18 +24,18 @@ def test_call_commit_graph(): first_commit['authorEmail'], first_commit['message']) ccgraph.add_node('f1') - ccgraph.update_node_history('f1', 10) + ccgraph.update_node_history('f1', 10, 0) ccgraph.add_node('f2') - ccgraph.update_node_history('f2', 10) + ccgraph.update_node_history('f2', 10, 0) ccgraph.add_edge('f1', 'f2') func_drs = ccgraph.function_devranks(0.85) commit_drs = ccgraph.commit_devranks(0.85) dev_drs = ccgraph.developer_devranks(0.85) - assert(isclose(func_drs['f1'], 0.35, rel_tol=1e-2)) - assert(isclose(func_drs['f2'], 0.65, rel_tol=1e-2)) - assert(isclose(commit_drs[first_commit['hexsha']], 1)) - assert(isclose(dev_drs[first_commit['authorEmail']], 1)) + assert isclose(func_drs['f1'], 0.35, rel_tol=1e-2) + assert isclose(func_drs['f2'], 0.65, rel_tol=1e-2) + assert isclose(commit_drs[first_commit['hexsha']], 1) + assert isclose(dev_drs[first_commit['authorEmail']], 1) second_commit = { 'hexsha': '0x02', @@ -47,19 +48,19 @@ def test_call_commit_graph(): second_commit['authorEmail'], second_commit['message']) ccgraph.add_node('f3') - ccgraph.update_node_history('f3', 10) + ccgraph.update_node_history('f3', 10, 0) ccgraph.add_edge('f1', 'f3') func_drs2 = ccgraph.function_devranks(0.85) commit_drs2 = ccgraph.commit_devranks(0.85) dev_drs2 = ccgraph.developer_devranks(0.85) - assert(isclose(func_drs2['f1'], 0.26, rel_tol=1e-2)) - assert(isclose(func_drs2['f2'], 0.37, rel_tol=1e-2)) - assert(isclose(func_drs2['f3'], 0.37, rel_tol=1e-2)) - assert(isclose(commit_drs2[first_commit['hexsha']], 0.63, rel_tol=1e-2)) - assert(isclose(commit_drs2[second_commit['hexsha']], 0.37, rel_tol=1e-2)) - assert(isclose(dev_drs2[first_commit['authorEmail']], 0.63, rel_tol=1e-2)) - assert(isclose(dev_drs2[second_commit['authorEmail']], 0.37, rel_tol=1e-2)) + assert isclose(func_drs2['f1'], 0.26, rel_tol=1e-2) + assert isclose(func_drs2['f2'], 0.37, rel_tol=1e-2) + assert isclose(func_drs2['f3'], 0.37, rel_tol=1e-2) + assert isclose(commit_drs2[first_commit['hexsha']], 0.63, rel_tol=1e-2) + assert isclose(commit_drs2[second_commit['hexsha']], 0.37, rel_tol=1e-2) + assert isclose(dev_drs2[first_commit['authorEmail']], 0.63, rel_tol=1e-2) + assert isclose(dev_drs2[second_commit['authorEmail']], 0.37, rel_tol=1e-2) third_commit = { 'hexsha': '0x03', @@ -72,34 +73,36 @@ def test_call_commit_graph(): third_commit['authorEmail'], third_commit['message']) ccgraph.add_node('f4') - ccgraph.update_node_history('f4', 10) + ccgraph.update_node_history('f4', 10, 0) ccgraph.add_edge('f2', 'f4') ccgraph.add_node('f5') - ccgraph.update_node_history('f5', 10) + ccgraph.update_node_history('f5', 10, 0) ccgraph.add_edge('f2', 'f5') func_drs3 = ccgraph.function_devranks(0.85) commit_drs3 = ccgraph.commit_devranks(0.85) dev_drs3 = ccgraph.developer_devranks(0.85) - assert(isclose(func_drs3['f1'], 0.141, rel_tol=1e-2)) - assert(isclose(func_drs3['f2'], 0.201, rel_tol=1e-2)) - assert(isclose(func_drs3['f3'], 0.201, rel_tol=1e-2)) - assert(isclose(func_drs3['f4'], 0.227, rel_tol=1e-2)) - assert(isclose(func_drs3['f5'], 0.227, rel_tol=1e-2)) - assert(isclose(commit_drs3[first_commit['hexsha']], 0.343, rel_tol=1e-2)) - assert(isclose(commit_drs3[second_commit['hexsha']], 0.201, rel_tol=1e-2)) - assert(isclose(commit_drs3[third_commit['hexsha']], 0.454, rel_tol=1e-2)) - assert(isclose(dev_drs3[first_commit['authorEmail']], 0.798, rel_tol=1e-2)) - assert(isclose(dev_drs3[second_commit['authorEmail']], 0.201, rel_tol=1e-2)) + assert isclose(func_drs3['f1'], 0.141, rel_tol=1e-2) + assert isclose(func_drs3['f2'], 0.201, rel_tol=1e-2) + assert isclose(func_drs3['f3'], 0.201, rel_tol=1e-2) + assert isclose(func_drs3['f4'], 0.227, rel_tol=1e-2) + assert isclose(func_drs3['f5'], 0.227, rel_tol=1e-2) + assert isclose(commit_drs3[first_commit['hexsha']], 0.343, rel_tol=1e-2) + assert isclose(commit_drs3[second_commit['hexsha']], 0.201, rel_tol=1e-2) + assert isclose(commit_drs3[third_commit['hexsha']], 0.454, rel_tol=1e-2) + assert isclose(dev_drs3[first_commit['authorEmail']], 0.798, rel_tol=1e-2) + assert isclose(dev_drs3[second_commit['authorEmail']], 0.201, rel_tol=1e-2) -def test_black_set(): +@pytest.mark.asyncio +async def test_black_set(): """ The CRLF commit: https://github.com/bitcoin/bitcoin/commit/0a61b0df1224a5470bcddab302bc199ca5a9e356 Its parent: https://github.com/bitcoin/bitcoin/commit/5b721607b1057df4dfe97f80d235ed372312f398 Its grandparent: https://github.com/bitcoin/bitcoin/commit/2ef9cfa5b81877b1023f2fcb82f5a638b1eb013c Its great grandparent: https://github.com/bitcoin/bitcoin/commit/7d7797b141dbd4ed9db1dda94684beb3395c2534 + Its great great grandparent: https://github.com/bitcoin/bitcoin/commit/401926283a200994ecd7df8eae8ced8e0b067c46 """ repo_path = os.path.join(root_path, 'repos/bitcoin') bitcoin_url = 'https://github.com/bitcoin/bitcoin' @@ -107,13 +110,14 @@ def test_black_set(): Repo.clone_from(bitcoin_url, repo_path) az = Analyzer(repo_path, CPPGraphServer(CPP_FILENAME_REGEXES)) crlf_sha = '0a61b0df1224a5470bcddab302bc199ca5a9e356' - ggparent_sha = '7d7797b141dbd4ed9db1dda94684beb3395c2534' - rev = ggparent_sha + '..' + crlf_sha - az.analyze(rev=rev) + parent_sha = '5b721607b1057df4dfe97f80d235ed372312f398' + gggparent_sha = '401926283a200994ecd7df8eae8ced8e0b067c46' + rev = gggparent_sha + '..' + crlf_sha + await az.analyze(rev=rev) ccgraph = az.get_graph() devdict = ccgraph.commit_devranks(0.85) - devdict2 = ccgraph.commit_devranks(0.85, black_set=set([crlf_sha])) - assert(len(devdict) == 3) - assert(len(devdict2) == 2) - assert(crlf_sha in devdict) - assert(crlf_sha not in devdict2) + devdict2 = ccgraph.commit_devranks(0.85, black_set=set([parent_sha])) + assert len(devdict) == 3 + assert len(devdict2) == 2 + assert parent_sha in devdict + assert parent_sha not in devdict2 diff --git a/test/test_analytics/test_detect_change.py b/test/test_analytics/test_detect_change.py index b5f44694ff6..fc79d463ad5 100644 --- a/test/test_analytics/test_detect_change.py +++ b/test/test_analytics/test_detect_change.py @@ -2,7 +2,7 @@ from persper.analytics.patch_parser import PatchParser from persper.analytics.detect_change import get_changed_functions from persper.analytics.call_graph.cpp import get_func_ranges_cpp -from persper.analytics.srcml import transform_src_to_tree +from persper.analytics.srcml import src_to_tree dir_path = os.path.dirname(os.path.abspath(__file__)) @@ -37,38 +37,39 @@ def test_detect_change(): with open(os.path.join(dir_path, 'example.patch'), 'r') as f: example_patch = f.read() parsing_result = parser.parse(example_patch) - assert(parsing_result == parsing_truth) + assert parsing_result == parsing_truth with open(os.path.join(dir_path, 'example.cc'), 'r') as f: - root = transform_src_to_tree(f.read(), ext='.cc') + root = src_to_tree('example.cc', f.read()) func_ranges_result = get_func_ranges_cpp(root) - assert(func_ranges_result == func_ranges_truth) + assert func_ranges_result == func_ranges_truth + + assert changed_result == get_changed_functions( + *func_ranges_result, *parsing_result) - assert(changed_result == get_changed_functions( - *func_ranges_result, *parsing_result)) def test_patch_parser(): parser = PatchParser() patch2_truth = ( - [[0, 6]], + [[0, 6]], [] ) with open(os.path.join(dir_path, 'example2.patch'), 'r') as f: example2_patch = f.read() parsing_result = parser.parse(example2_patch) - assert(parsing_result == patch2_truth) + assert parsing_result == patch2_truth # view patch3_truth here # https://github.com/UltimateBeaver/test_feature_branch/commit/caaac10f604ea7ac759c2147df8fb2b588ee2a27 patch3_truth = ( [[10, 4], [12, 1], [14, 1], [17, 13]], - [[9, 10], [12, 12], [14, 14]] + [[9, 10], [12, 12], [14, 14]] ) with open(os.path.join(dir_path, 'example3.patch'), 'r') as f: example3_patch = f.read() parsing_result = parser.parse(example3_patch) - assert(parsing_result == patch3_truth) + assert parsing_result == patch3_truth # view patch4_truth here # https://github.com/UltimateBeaver/test_feature_branch/commit/364d5cc49aeb2e354da458924ce84c0ab731ac77 @@ -79,9 +80,24 @@ def test_patch_parser(): with open(os.path.join(dir_path, 'example4.patch'), 'r') as f: example4_patch = f.read() parsing_result = parser.parse(example4_patch) - assert(parsing_result == patch4_truth) - - - + assert parsing_result == patch4_truth +def test_no_newline_at_the_end_of_file(): + parser = PatchParser() + patch5_truth = ( + [[12, 1]], [[12, 12]] + ) + with open(os.path.join(dir_path, 'example5.patch'), 'r') as f: + example5_patch = f.read() + parsing_result = parser.parse(example5_patch) + assert parsing_result == patch5_truth + + patch6_truth = ( + [[17, 1], [20, 3], [30, 5]], + [[12, 12], [17, 17], [20, 20], [22, 22], [24, 30]] + ) + with open(os.path.join(dir_path, 'example6.patch'), 'r') as f: + example6_patch = f.read() + parsing_result = parser.parse(example6_patch) + assert parsing_result == patch6_truth diff --git a/test/test_analytics/test_devrank.py b/test/test_analytics/test_devrank.py index 4bbede1e7cb..5e0a29e6460 100644 --- a/test/test_analytics/test_devrank.py +++ b/test/test_analytics/test_devrank.py @@ -8,18 +8,18 @@ def test_devrank(): G.add_node(2, weight=10) G.add_edge(1, 2) G.add_edge(2, 1) - assert(devrank(G, 'weight') == {1: 0.5, 2: 0.5}) + assert devrank(G, 'weight') == {1: 0.5, 2: 0.5} G2 = nx.DiGraph() G2.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 1)]) for u in G2: G2.node[u]['weight'] = 10 - assert(devrank(G2, 'weight') == {1: 0.25, 2: 0.25, 3: 0.25, 4: 0.25}) + assert devrank(G2, 'weight') == {1: 0.25, 2: 0.25, 3: 0.25, 4: 0.25} G3 = nx.DiGraph() G3.add_edge(1, 2) for u in G3: G3.node[u]['weight'] = 10 dr = devrank(G3, 'weight', alpha=1.0) - assert(abs(dr[1] - 0.3333) < 0.0001) - assert(abs(dr[2] - 0.6666) < 0.0001) + assert abs(dr[1] - 0.3333) < 0.0001 + assert abs(dr[2] - 0.6666) < 0.0001 diff --git a/test/test_analytics/test_diff.py b/test/test_analytics/test_diff.py new file mode 100644 index 00000000000..fc07cbb9708 --- /dev/null +++ b/test/test_analytics/test_diff.py @@ -0,0 +1,22 @@ +import os +from git import Repo +from persper.analytics.git_tools import diff_with_first_parent +from persper.util.path import root_path + + +def test_diff_ignore_space(): + """ + bitcoin project has a commit which only converts CRLF to LF + its diff with parent should be empty when + ignore space option is enabled + The CRLF commit: https://github.com/bitcoin/bitcoin/commit/0a61b0df1224a5470bcddab302bc199ca5a9e356 + """ + repo_path = os.path.join(root_path, "repos/bitcoin") + bitcoin_url = 'https://github.com/bitcoin/bitcoin' + if not os.path.exists(repo_path): + Repo.clone_from(bitcoin_url, repo_path) + r = Repo(repo_path) + crlf_sha = '0a61b0df1224a5470bcddab302bc199ca5a9e356' + crlf_commit = r.commit(crlf_sha) + diff_result = diff_with_first_parent(r, crlf_commit) + assert len(diff_result) == 0 diff --git a/test/test_analytics/test_inverse_diff.py b/test/test_analytics/test_inverse_diff.py index 883dd45ce6d..4b8da4cc1c2 100644 --- a/test/test_analytics/test_inverse_diff.py +++ b/test/test_analytics/test_inverse_diff.py @@ -15,4 +15,4 @@ def test_inverse_diff(): ) inv_result = inverse_diff(*adds_dels) - assert(inv_truth == inv_result) + assert inv_truth == inv_result diff --git a/test/test_analytics/test_iterator.py b/test/test_analytics/test_iterator.py index 79a576d6692..745b879c83e 100644 --- a/test/test_analytics/test_iterator.py +++ b/test/test_analytics/test_iterator.py @@ -29,37 +29,37 @@ def test_iterator(ri): commits, branch_commits = ri.iter(from_beginning=True, into_branches=True) # from A to L # use `git log --graph` to view ground truth - assert(len(ri.visited) == 12) - assert(len(commits) == 4) - assert(len(branch_commits) == 8) - assert(serialized_messages(commits) == 'D C B A') - assert(serialized_messages(branch_commits) == 'G F E J I H L K') + assert len(ri.visited) == 12 + assert len(commits) == 4 + assert len(branch_commits) == 8 + assert serialized_messages(commits) == 'D C B A' + assert serialized_messages(branch_commits) == 'G F E J I H L K' def test_continue_iter(ri): commits, branch_commits = ri.iter( from_beginning=True, num_commits=2, into_branches=True) - assert(serialized_messages(commits) == 'B A') - assert(serialized_messages(branch_commits) == '') + assert serialized_messages(commits) == 'B A' + assert serialized_messages(branch_commits) == '' commits2, branch_commits2 = ri.iter( continue_iter=True, num_commits=2, into_branches=True) - assert(serialized_messages(commits2) == 'D C') - assert(serialized_messages(branch_commits2) == 'G F E J I H L K') + assert serialized_messages(commits2) == 'D C' + assert serialized_messages(branch_commits2) == 'G F E J I H L K' def test_rev(ri): commits, branch_commits = ri.iter(rev='C', into_branches=True) - assert(serialized_messages(commits) == 'C B A') - assert(serialized_messages(branch_commits) == '') + assert serialized_messages(commits) == 'C B A' + assert serialized_messages(branch_commits) == '' commits2, branch_commits2 = ri.iter( continue_iter=True, end_commit_sha='D', into_branches=True) - assert(serialized_messages(commits2) == 'D') - assert(serialized_messages(branch_commits2) == 'G F E J I H L K') + assert serialized_messages(commits2) == 'D' + assert serialized_messages(branch_commits2) == 'G F E J I H L K' def test_iter_twice(ri): commits, branch_commits = ri.iter(from_beginning=True, into_branches=True) commits2, branch_commits2 = ri.iter( from_beginning=True, into_branches=True) - assert(commits == commits2) - assert(branch_commits == branch_commits2) + assert commits == commits2 + assert branch_commits == branch_commits2 diff --git a/test/test_analytics/test_score.py b/test/test_analytics/test_score.py new file mode 100644 index 00000000000..427d2fa4d44 --- /dev/null +++ b/test/test_analytics/test_score.py @@ -0,0 +1,39 @@ +from persper.analytics.score import commit_overall_scores + + +def test_commit_overall_scores(): + # sums up to 1 + commit_devranks = { + 'abcdefg': 0.2, + 'bcdefgh': 0.3, + 'cdefghi': 0.4, + 'defghij': 0.1, + } + + # suppose a commit can be one of two types + clf_results = { + 'abcdefg': [0.3, 0.7], + 'bcdefgh': [0.9, 0.1], + 'cdefghi': [0.2, 0.8], + 'defghij': [0.6, 0.4], + } + + # the first type is twice as valuable as the second type + label_weights = [2, 1] + + score_truth = { + 'abcdefg': 0.17687074829931967, + 'bcdefgh': 0.3877551020408163, + 'cdefghi': 0.326530612244898, + 'defghij': 0.108843537414966 + } + + top_one_score_truth = { + 'abcdefg': 0.14285714285714285, + 'bcdefgh': 0.4285714285714285, + 'cdefghi': 0.2857142857142857, + 'defghij': 0.14285714285714285 + } + + assert score_truth == commit_overall_scores(commit_devranks, clf_results, label_weights) + assert top_one_score_truth == commit_overall_scores(commit_devranks, clf_results, label_weights, top_one=True) diff --git a/test/test_analytics/test_srcml.py b/test/test_analytics/test_srcml.py new file mode 100644 index 00000000000..e04fde8a426 --- /dev/null +++ b/test/test_analytics/test_srcml.py @@ -0,0 +1,12 @@ +import os +from persper.analytics.srcml import src_to_tree +from persper.util.path import root_path + + +def test_src_to_tree(): + filename = 'example.cc' + full_path = os.path.join(root_path, 'test/test_analytics', filename) + with open(full_path, 'r') as f: + src = f.read() + root = src_to_tree(filename, src) + assert root.attrib['filename'] == filename diff --git a/test/test_analytics/utility/__init__.py b/test/test_analytics/utility/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/test_analytics/utility/graph_baseline.py b/test/test_analytics/utility/graph_baseline.py new file mode 100644 index 00000000000..96527e86e8b --- /dev/null +++ b/test/test_analytics/utility/graph_baseline.py @@ -0,0 +1,135 @@ +""" +Utility functions for graph-dump-based regression tests. +""" +import json +import logging +import os +from enum import Enum +from pathlib import Path + +from git import Commit +from networkx import Graph + +from persper.analytics.analyzer2 import Analyzer, AnalyzerObserver +from persper.analytics.call_commit_graph import CallCommitGraph, CommitIdGenerators +from persper.analytics.graph_server import CommitSeekingMode + +_logger = logging.getLogger() + +testDataRoot = os.path.dirname(os.path.abspath(__file__)) + + +def formatEdgeId(u: str, v: str): + return u + "|->|" + v + + +def graphToDict(ccg: CallCommitGraph): + nodes = ccg.nodes(data=True) + for name, attr in nodes: + if "files" in attr: + files = list(attr["files"]) + files.sort() + attr["files"] = files + result = { + "nodes": dict(nodes), + "edges": dict(((formatEdgeId(u, v), data) for (u, v, data) in ccg.edges(data=True))) + } + return result + + +def fixGraphDict(graphData: dict): + if "nodes" in graphData: + for id, attr in graphData["nodes"].items(): + if "files" in attr: + attr["files"] = set(attr["files"]) + return graphData + + +def assertGraphMatches(baseline: dict, ccg: CallCommitGraph): + baselineNodeIds = set(baseline["nodes"].keys()) + for id, attr in ccg.nodes(data=True): + baselineAttr = baseline["nodes"].get(id, None) + assert baselineAttr != None, str.format("Extra node: {0}.", id) + assert baselineAttr == attr, str.format( + "Node attribute mismatch: {0}. Baseline: {1}; Actual: {2}.", id, baselineAttr, attr) + baselineNodeIds.remove(id) + assert not baselineNodeIds, str.format( + "Node(s) missing: %s.", baselineNodeIds) + baselineEdgeIds = set(baseline["edges"].keys()) + for u, v, attr in ccg.edges(data=True): + id = formatEdgeId(u, v) + baselineAttr = baseline["edges"].get(id, None) + assert baselineAttr != None, str.format("Extra branch: {0}.", id) + assert baselineAttr == attr, str.format( + "Branch attribute mismatch: {0}. Baseline: {1}; Actual: {2}.", id, baselineAttr, attr) + baselineEdgeIds.remove(id) + assert not baselineEdgeIds, str.format( + "Branch(es) missing: {0}.", baselineEdgeIds) + +class GraphDumpAnalyzerObserver(AnalyzerObserver): + """ + An implementation of AnalyzerObserver that generates graph dump after each commit, + and/or asserts the generated graph is the same as baseline graph dump. + """ + + def __init__(self, graphBaselineDumpPath: str = None, graphTestDumpPath: str = None, + dumpOnlyOnError: bool = None, dumpNaming = CommitIdGenerators.fromHexsha): + """ + Params: + graphBaselineDumpPath: root folder of the baseline graph dump files. Set to values other than `None` + to perform basline assertions after each commit. + graphTestDumpPath: root folder to persist graph dump of observed Analyzer after each commit. This is + also the root folder to dump current graph if baseline assertion fails in any commit. + dumpOnlyOnError: True: dump current graph in Analyzer only when baseline assertion fails. + False: dump current graph in Analyzer after each commit. + None: if graphBaselineDumpPath == None, same as True; otherwise, same as False. + dumpNaming: specify how to name the graph dump files. + Remarks: + Set `graphBaselineDumpPath` to `None` to generate graph dump files in the folder specified in `graphTestDumpPath`, + which can be used as `graphBaselineDumpPath` in the next run. + """ + super().__init__() + if graphBaselineDumpPath: + self._baselinePath = Path(graphBaselineDumpPath).resolve() + else: + self._baselinePath = None + if graphTestDumpPath: + self._dumpPath = Path(graphTestDumpPath).resolve() + self._dumpPath.mkdir(parents=True, exist_ok=True) + else: + self._dumpPath = None + self._dumpOnlyOnError = graphBaselineDumpPath != None if dumpOnlyOnError == None else dumpOnlyOnError + self._dumpNaming = dumpNaming + + def onAfterCommit(self, analyzer: Analyzer, commit: Commit, seeking_mode: CommitSeekingMode): + if seeking_mode == CommitSeekingMode.Rewind: + return + graph: CallCommitGraph = analyzer.graph + graphDumpLocalName = self._dumpNaming(-1, commit.hexsha, commit.message) + ".g.json" + + def dumpGraph(warnIfNotAvailable: bool): + if not self._dumpPath: + if warnIfNotAvailable: + _logger.warning( + "Cannot dump call commit graph because no dump path has been specified. Commit %s: %s.", commit.hexsha, commit.message) + return False + data = graphToDict(graph) + graphPath = self._dumpPath.joinpath(graphDumpLocalName) + with open(graphPath, "wt") as f: + json.dump(data, f, sort_keys=True, indent=4) + return True + # check baseline for regression + if self._baselinePath: + try: + graphPath = self._baselinePath.joinpath(graphDumpLocalName) + baselineData: dict = None + with open(graphPath, "rt") as f: + baselineData = fixGraphDict(json.load(f)) + assertGraphMatches(baselineData, graph) + except: + _logger.error("Failed on commit %s: %s.", + commit.hexsha, commit.message) + dumpGraph(True) + raise + if not self._dumpOnlyOnError: + dumpGraph(False) diff --git a/test/test_feature_branch/C/main.c b/test/test_feature_branch/C/main.c index 401fb0597d6..5eaeabc97a7 100644 --- a/test/test_feature_branch/C/main.c +++ b/test/test_feature_branch/C/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl. + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/D/main.c b/test/test_feature_branch/D/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/D/main.c +++ b/test/test_feature_branch/D/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/E/main.c b/test/test_feature_branch/E/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/E/main.c +++ b/test/test_feature_branch/E/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/F/main.c b/test/test_feature_branch/F/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/F/main.c +++ b/test/test_feature_branch/F/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/G/main.c b/test/test_feature_branch/G/main.c index 19d8834f11f..67f1d720f3a 100644 --- a/test/test_feature_branch/G/main.c +++ b/test/test_feature_branch/G/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B*/ char* str_append_chr(char* string, char append) { char* newstring = NULL; diff --git a/test/test_feature_branch/H/main.c b/test/test_feature_branch/H/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/H/main.c +++ b/test/test_feature_branch/H/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/I/main.c b/test/test_feature_branch/I/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/I/main.c +++ b/test/test_feature_branch/I/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_feature_branch/K/main.c b/test/test_feature_branch/K/main.c index 401fb0597d6..c5a80c50643 100644 --- a/test/test_feature_branch/K/main.c +++ b/test/test_feature_branch/K/main.c @@ -8,6 +8,8 @@ int str_len(char *string) /* str_append is deleted in B */ +int str_equals(char *equal1, char *eqaul2); // Forward decl + /* added in B, edited in C */ char* str_append_chr(char* string, char append) { char* newstring = ""; diff --git a/test/test_graphs/example.cc b/test/test_graphs/example.cc deleted file mode 100644 index 6ab29590f3d..00000000000 --- a/test/test_graphs/example.cc +++ /dev/null @@ -1,100 +0,0 @@ -// migration_controller.cc -// Copyright (c) 2014 Jinglei Ren - -#include "migration_controller.h" - -using namespace std; - -void MigrationController::InputBlocks( - const vector& blocks) { - assert(nvm_pages_.empty()); - for (vector::const_iterator it = blocks.begin(); - it != blocks.end(); ++it) { - if (it->state == ATTEntry::CLEAN || it->state == ATTEntry::FREE) { - assert(it->epoch_writes == 0); - continue; - } - uint64_t block_addr = it->phy_tag << block_bits_; - NVMPage& p = nvm_pages_[PageAlign(block_addr)]; - p.epoch_reads += it->epoch_reads; - p.epoch_writes += it->epoch_writes; - - if (it->epoch_writes) { - p.blocks.insert(block_addr); - assert(p.blocks.size() <= page_blocks_); - } - } - dirty_nvm_pages_ += nvm_pages_.size(); -} - -bool MigrationController::ExtractNVMPage(NVMPageStats& stats, - Profiler& profiler) { - if (nvm_heap_.empty()) { - for (unordered_map::iterator it = nvm_pages_.begin(); - it != nvm_pages_.end(); ++it) { - double dr = it->second.blocks.size() / page_blocks_; - double wr = it->second.epoch_writes / page_blocks_; - nvm_heap_.push_back({it->first, dr, wr}); - - total_nvm_writes_ += it->second.epoch_writes; - dirty_nvm_blocks_ += it->second.blocks.size(); - } - make_heap(nvm_heap_.begin(), nvm_heap_.end()); - } - profiler.AddTableOp(); - - if (nvm_heap_.empty()) return false; - - stats = nvm_heap_.front(); - pop_heap(nvm_heap_.begin(), nvm_heap_.end()); - nvm_heap_.pop_back(); - return true; -} - -bool MigrationController::ExtractDRAMPage(DRAMPageStats& stats, - Profiler& profiler) { - if (dram_heap_.empty()) { - int dirts = 0; - for (unordered_map::iterator it = entries_.begin(); - it != entries_.end(); ++it) { - double wr = it->second.epoch_writes / page_blocks_; - dram_heap_.push_back({it->first, it->second.state, wr}); - - total_dram_writes_ += it->second.epoch_writes; - dirts += (it->second.epoch_writes ? 1 : 0); - } - assert(dirts == dirty_entries_); - dirty_dram_pages_ += dirty_entries_; - - make_heap(dram_heap_.begin(), dram_heap_.end()); - } - profiler.AddTableOp(); - - if (dram_heap_.empty()) return false; - - stats = dram_heap_.front(); - pop_heap(dram_heap_.begin(), dram_heap_.end()); - dram_heap_.pop_back(); - return true; -} - -void MigrationController::Clear(Profiler& profiler) { - profiler.AddPageMoveInter(dirty_entries_); // epoch write-backs - for (PTTEntryIterator it = entries_.begin(); it != entries_.end(); ++it) { - it->second.epoch_reads = 0; - it->second.epoch_writes = 0; - if (it->second.state == PTTEntry::DIRTY_DIRECT) { - ShiftState(it->second, PTTEntry::CLEAN_DIRECT, Profiler::Overlap); - --dirty_entries_; - } else if (it->second.state == PTTEntry::DIRTY_STATIC) { - ShiftState(it->second, PTTEntry::CLEAN_STATIC, Profiler::Overlap); - --dirty_entries_; - } - } - profiler.AddTableOp(); - assert(dirty_entries_ == 0); - - nvm_pages_.clear(); - dram_heap_.clear(); - nvm_heap_.clear(); -} \ No newline at end of file diff --git a/test/test_graphs/example.patch b/test/test_graphs/example.patch deleted file mode 100644 index 0f835ec6469..00000000000 --- a/test/test_graphs/example.patch +++ /dev/null @@ -1,111 +0,0 @@ -@@ -5,6 +5,37 @@ - - using namespace std; - - +void MigrationController::FillNVMPageHeap() { - + for (unordered_map::iterator it = nvm_pages_.begin(); - + it != nvm_pages_.end(); ++it) { - + double dr = it->second.blocks.size() / page_blocks_; - + double wr = it->second.epoch_writes / page_blocks_; - + nvm_heap_.push_back({it->first, dr, wr}); - + - + total_nvm_writes_ += it->second.epoch_writes; - + dirty_nvm_blocks_ += it->second.blocks.size(); - + } - + make_heap(nvm_heap_.begin(), nvm_heap_.end()); - + nvm_heap_filled_ = true; - +} - + - +void MigrationController::FillDRAMPageHeap() { - + int dirts = 0; - + for (unordered_map::iterator it = entries_.begin(); - + it != entries_.end(); ++it) { - + double wr = it->second.epoch_writes / page_blocks_; - + dram_heap_.push_back({it->first, it->second.state, wr}); - + - + total_dram_writes_ += it->second.epoch_writes; - + dirts += (it->second.epoch_writes ? 1 : 0); - + } - + assert(dirts == dirty_entries_); - + dirty_dram_pages_ += dirty_entries_; - + - + make_heap(dram_heap_.begin(), dram_heap_.end()); - + dram_heap_filled_ = true; - +} - + - void MigrationController::InputBlocks( - const vector& blocks) { - assert(nvm_pages_.empty()); - @@ -25,56 +56,37 @@ void MigrationController::InputBlocks( - } - } - dirty_nvm_pages_ += nvm_pages_.size(); - + - + FillNVMPageHeap(); - + FillDRAMPageHeap(); - } - - bool MigrationController::ExtractNVMPage(NVMPageStats& stats, - Profiler& profiler) { - - if (nvm_heap_.empty()) { - - for (unordered_map::iterator it = nvm_pages_.begin(); - - it != nvm_pages_.end(); ++it) { - - double dr = it->second.blocks.size() / page_blocks_; - - double wr = it->second.epoch_writes / page_blocks_; - - nvm_heap_.push_back({it->first, dr, wr}); - - - - total_nvm_writes_ += it->second.epoch_writes; - - dirty_nvm_blocks_ += it->second.blocks.size(); - - } - - make_heap(nvm_heap_.begin(), nvm_heap_.end()); - - } - - profiler.AddTableOp(); - + assert(nvm_heap_filled_); - - if (nvm_heap_.empty()) return false; - - stats = nvm_heap_.front(); - pop_heap(nvm_heap_.begin(), nvm_heap_.end()); - nvm_heap_.pop_back(); - + - + profiler.AddTableOp(); - return true; - } - - bool MigrationController::ExtractDRAMPage(DRAMPageStats& stats, - Profiler& profiler) { - - if (dram_heap_.empty()) { - - int dirts = 0; - - for (unordered_map::iterator it = entries_.begin(); - - it != entries_.end(); ++it) { - - double wr = it->second.epoch_writes / page_blocks_; - - dram_heap_.push_back({it->first, it->second.state, wr}); - - - - total_dram_writes_ += it->second.epoch_writes; - - dirts += (it->second.epoch_writes ? 1 : 0); - - } - - assert(dirts == dirty_entries_); - - dirty_dram_pages_ += dirty_entries_; - - - - make_heap(dram_heap_.begin(), dram_heap_.end()); - - } - + assert(dram_heap_filled_); - profiler.AddTableOp(); - - if (dram_heap_.empty()) return false; - - stats = dram_heap_.front(); - pop_heap(dram_heap_.begin(), dram_heap_.end()); - dram_heap_.pop_back(); - + - + profiler.AddTableOp(); - return true; - } - - @@ -97,4 +109,6 @@ void MigrationController::Clear(Profiler& profiler) { - nvm_pages_.clear(); - dram_heap_.clear(); - nvm_heap_.clear(); - + dram_heap_filled_ = false; - + nvm_heap_filled_ = false; - } \ No newline at end of file diff --git a/test/test_graphs/example2.patch b/test/test_graphs/example2.patch deleted file mode 100644 index ed3efe78b7f..00000000000 --- a/test/test_graphs/example2.patch +++ /dev/null @@ -1,8 +0,0 @@ -@@ -1 +1,7 @@ -+/* -+ * linux/lib/errno.c -+ * -+ * (C) 1991 Linus Torvalds -+ */ -+ - int errno; diff --git a/test/test_graphs/example3.patch b/test/test_graphs/example3.patch deleted file mode 100644 index f094bbc71f6..00000000000 --- a/test/test_graphs/example3.patch +++ /dev/null @@ -1,32 +0,0 @@ -@@ -6,12 +6,27 @@ int str_len(char *string) - return count - string; - } - --/* added in A*/ --char* str_append(char* string, char* append) { -+/* str_append is deleted in B */ -+ -+/* added in B */ -+char* str_append_chr(char* string, char append) { - char* newstring = NULL; -- size_t needed = snprintf(NULL, 0, "%s%s", string, append); -+ size_t needed = snprintf(NULL, 0, "%s%c", string, append); - newstring = malloc(needed); -- sprintf(newstring, "%s%s", string, append); -+ sprintf(newstring, "%s%c", string, append); - return newstring; - } - -+/* added in B */ -+int str_equals(char *equal1, char *eqaul2) -+{ -+ while(*equal1==*eqaul2) -+ { -+ if ( *equal1 == '\0' || *eqaul2 == '\0' ){break;} -+ equal1++; -+ eqaul2++; -+ } -+ if(*eqaul1 == '\0' && *eqaul2 == '\0' ){return 0;} -+ else {return -1}; -+} -+ \ No newline at end of file diff --git a/test/test_graphs/example4.patch b/test/test_graphs/example4.patch deleted file mode 100644 index 4c72485b6f2..00000000000 --- a/test/test_graphs/example4.patch +++ /dev/null @@ -1,28 +0,0 @@ -@@ -0,0 +1,27 @@ -+/* added in G */ -+char* str_replace(char* search, char* replace, char* subject) { -+ char* newstring = ""; -+ int i = 0; -+ for(i = 0; i < str_len(subject); i++) { -+ if (subject[i] == search[0]) { -+ int e = 0; -+ char* calc = ""; -+ for(e = 0; e < str_len(search); e++) { -+ if(subject[i+e] == search[e]) { -+ calc = str_append_chr(calc, search[e]); -+ } -+ } -+ if (str_equals(search, calc) == 0) { -+ newstring = str_append(newstring, replace); -+ i = i + str_len (search)-1; -+ } -+ else { -+ newstring = str_append_chr(newstring, subject[i]); -+ } -+ } -+ else { -+ newstring = str_append_chr(newstring, subject[i]); -+ } -+ } -+ return newstring; -+} \ No newline at end of file diff --git a/test/test_graphs/test_analyzer.py b/test/test_graphs/test_analyzer.py deleted file mode 100644 index 21f0d35883c..00000000000 --- a/test/test_graphs/test_analyzer.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import pytest -import pickle -import subprocess -from persper.graphs.c import CGraphServer -from persper.graphs.analyzer import Analyzer -from persper.graphs.iterator import RepoIterator -from persper.util.path import root_path -from persper.graphs.graph_server import C_FILENAME_REGEXES - - -@pytest.fixture(scope='module') -def az(): - # build the repo first if not exists yet - repo_path = os.path.join(root_path, 'repos/test_feature_branch') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/test_feature_branch') - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - return Analyzer(repo_path, CGraphServer(C_FILENAME_REGEXES)) - - -def assert_graphs_equal(g1, g2): - assert(set(g1.nodes()) == set(g2.nodes())) - assert(set(g1.edges()) == set(g2.edges())) - for n in g1: - print(n) - assert(g1.node[n] == g2.node[n]) - - -def assert_analyzer_equal(az1, az2): - assert(az1.history == az2.history) - assert_graphs_equal(az1.graph_server.get_graph(), az2.graph_server.get_graph()) - - -def assert_graph_match_history(az): - # total edits data stored in the graph should match az.history - master_commits, _ = az.ri.iter(from_beginning=True) - master_sha_set = set([c.hexsha for c in master_commits]) - g = az.graph_server.get_graph() - for func in g.nodes(): - print(func) - func_sum = 0 - for sha in az.history: - if sha in master_sha_set and func in az.history[sha]: - func_sum += az.history[sha][func] - if g.node[func]['defined']: - assert(func_sum == g.node[func]['num_lines']) - - -def test_az_basic(az): - az.analyze(from_beginning=True, into_branches=True) - assert_graph_match_history(az) - - history_truth = { - 'K': {'display': 5}, - 'F': {'display': 14, 'count': 12}, - 'E': {'append': 29, 'add': 11}, - 'D': {'str_replace': 26}, - 'C': {'str_append_chr': 34, 'str_equals': 1}, - 'B': {'str_append': 9, 'str_append_chr': 7, 'str_equals': 11}, - 'A': {'str_append': 7, 'str_len': 6}, - - # branch J from commit A, merge back through F - 'J': {'count': 12, 'display': 14}, - - # branch G from commit B, merge back through D - 'G': {'str_equals': 1, 'str_replace': 26}, - - # branch H from commit D, merge back through E - 'I': {'add': 5, 'append': 35, 'insert': 25}, - 'H': {'add': 16, 'append': 12, 'insert': 25}, - } - - for commit in az.ri.repo.iter_commits(): - assert(az.history[commit.hexsha] == - history_truth[commit.message.strip()]) - - edges_truth = [ - ('append', 'free'), - ('display', 'printf'), - ('str_replace', 'str_append_chr'), - ('str_replace', 'str_equals'), - ('str_replace', 'str_len'), - ('str_replace', 'str_append'), - ('str_append_chr', 'str_append_chr'), - ('str_append_chr', 'str_equals'), - ('str_append_chr', 'str_len'), - ('str_append_chr', 'str_append'), - ('add', 'malloc') - ] - assert(set(az.graph_server.get_graph().edges()) == set(edges_truth)) - - -def test_analyze_interface(az): - # test various ways to invoke process function - az.analyze(from_beginning=True, into_branches=True) - - repo_path = os.path.join(root_path, 'repos/test_feature_branch') - az1 = Analyzer(repo_path, CGraphServer(C_FILENAME_REGEXES)) - # A B - az1.analyze(from_beginning=True, num_commits=2, into_branches=True) - # C D - az1.analyze(continue_iter=True, num_commits=2, into_branches=True) - # E F K - az1.analyze(continue_iter=True, num_commits=3, into_branches=True) - # should see "The range specified is empty, terminated." - az1.analyze(continue_iter=True, num_commits=1, into_branches=True) - assert_analyzer_equal(az1, az) - - az2 = Analyzer(repo_path, CGraphServer(C_FILENAME_REGEXES)) - ri = RepoIterator(repo_path) - commits, _ = ri.iter(from_beginning=True) - assert(len(commits) == 7) - # should see "No history exists yet, terminated." - az2.analyze(continue_iter=True, num_commits=1, into_branches=True) - # A B C - az2.analyze(from_beginning=True, num_commits=3, into_branches=True) - # D E F - az2.analyze(from_beginning=True, - end_commit_sha=commits[5].hexsha, - into_branches=True) - # K - az2.analyze(from_beginning=True, - end_commit_sha=commits[6].hexsha, - into_branches=True) - assert_analyzer_equal(az2, az) - - -def test_save(az): - az.analyze(from_beginning=True, into_branches=True) - filename = "test_save_g.pickle" - az.save(filename) - with open(filename, 'rb') as f: - az1 = pickle.load(f) - os.remove(filename) - assert_analyzer_equal(az, az1) diff --git a/test/test_graphs/test_analyzer_js.py b/test/test_graphs/test_analyzer_js.py deleted file mode 100644 index e5739ddd587..00000000000 --- a/test/test_graphs/test_analyzer_js.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import time -import pytest -import subprocess -from persper.graphs.graph_server import JS_FILENAME_REGEXES -from persper.graphs.graph_server_http import GraphServerHttp -from persper.graphs.analyzer import Analyzer -from persper.util.path import root_path - -# Use a port other than the default 3000 in case of collision -server_port = 3002 -server_path = os.path.join(root_path, 'contribs/js-callgraph/src/app.js') - - -@pytest.fixture(scope='module') -def az(): - """ Build the test repo if not already exists - - Args: - repo_path - A string, path to the to-be-built test repo - script_path - A string, path to the repo creator script - test_src_path - A string, path to the dir to be passed to repo creator - """ - repo_path = os.path.join(root_path, 'repos/js_test_repo') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/js_test_repo') - server_addr = 'http://localhost:%d' % server_port - - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - return Analyzer(repo_path, GraphServerHttp(server_addr, JS_FILENAME_REGEXES)) - - -def assert_graph_match_history(az: Analyzer): - # total edits data stored in the graph should match az.history - g = az.graph_server.get_graph() - for fid in g.nodes(): - print(fid) - total_edits = 0 - for sha in az.history: - if fid in az.history[sha]: - total_edits += az.history[sha][fid] - assert(total_edits == g.node[fid]['num_lines']) - - -def test_az(az: Analyzer): - my_env = os.environ.copy() - my_env["PORT"] = str(server_port) - p = subprocess.Popen(['node', server_path], env=my_env) - - try: - # wait for the server to spin up - time.sleep(1.0) - az.graph_server.reset_graph() - az.analyze() - # assert_graph_match_history(az) - - history_truth = { - 'C': {'main.js:funcB:9:12': 1, - 'main.js:global': 1, - 'main.js:main:7:16': 1}, - 'B': {'main.js:funcB:9:11': 3, - 'main.js:global': 7, - 'main.js:main:7:15': 7}, - 'A': {'main.js:funcA:3:5': 3, - 'main.js:main:7:10': 4, - 'main.js:global': 12} - } - - for commit in az.ri.repo.iter_commits(): - assert(az.history[commit.hexsha] == - history_truth[commit.message.strip()]) - - edges_truth = [ - ('main.js:funcB:9:12', 'Native:Window_prototype_print'), - ('main.js:funcB:9:12', 'main.js:funcA:3:5'), - ('main.js:funcA:3:5', 'Native:Window_prototype_print'), - ('main.js:main:7:16', 'main.js:funcB:9:12'), - ('main.js:main:7:16', 'main.js:funcA:3:5'), - ('main.js:global', 'main.js:main:7:16') - ] - assert(set(az.graph_server.get_graph().edges()) == set(edges_truth)) - - finally: - p.terminate() diff --git a/test/test_graphs/test_callcommitgraph.py b/test/test_graphs/test_callcommitgraph.py deleted file mode 100644 index b4bafafc1eb..00000000000 --- a/test/test_graphs/test_callcommitgraph.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import pytest -import pickle -import subprocess -from persper.graphs.call_commit_graph import CallCommitGraph -from persper.graphs.call_commit_graph import _inverse_diff_result -from persper.util.path import root_path - - -@pytest.fixture(scope='module') -def g(): - # build the repo first if not exists yet - repo_path = os.path.join(root_path, 'repos/test_feature_branch') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/test_feature_branch') - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - g = CallCommitGraph(repo_path) - g.process(from_beginning=True, verbose=True, into_branches=True) - return g - - -def test_callcommitgraph(g): - history_truth = { - 'J': {'count': 12, 'display': 14}, - 'I': {'add': 5, 'append': 35}, - 'E': {}, - 'G': {'str_equals': 1, 'str_replace': 26}, - 'D': {}, - 'H': {'add': 16, 'append': 12}, - 'F': {}, - 'A': {'str_append': 7, 'str_len': 6}, - 'K': {'display': 5}, - 'C': {'str_append_chr': 34, 'str_equals': 1}, - 'B': {'str_append': 9, 'str_append_chr': 7, 'str_equals': 11} - } - - for commit in g.repo.iter_commits(): - assert(g.history[commit.hexsha] == - history_truth[commit.message.strip()]) - - edges_truth = [ - ('append', 'free'), - ('display', 'printf'), - ('str_replace', 'str_append_chr'), - ('str_replace', 'str_equals'), - ('str_replace', 'str_len'), - ('str_replace', 'str_append'), - ('str_append_chr', 'str_append_chr'), - ('str_append_chr', 'str_equals'), - ('str_append_chr', 'str_len'), - ('str_append_chr', 'str_append'), - ('add', 'malloc') - ] - assert(set(g.G.edges()) == set(edges_truth)) - - -def test_inverse_diff(): - # view parsing ground truth here - # https://github.com/basicthinker/Sexain-MemController/commit/f050c6f6dd4b1d3626574b0d23bb41125f7b75ca - adds_dels = ( - [[7, 31], [27, 3], [44, 1], [50, 2], [70, 1], [77, 2], [99, 2]], - [[32, 44], [56, 70]] - ) - inv_truth = ( - [[65, 13], [79, 15]], - [[8, 38], [59, 61], [66, 66], [73, 74], [80, 80], [88, 89], [112, 113]] - ) - - inv_result = _inverse_diff_result(*adds_dels) - assert(inv_truth == inv_result) - - -def assert_graphs_equal(G1, G2): - assert(set(G1.nodes()) == set(G2.nodes())) - assert(set(G1.edges()) == set(G2.edges())) - for n in G1: - assert(G1.node[n] == G2.node[n]) - - -def assert_callcommitgraphs_equal(g1, g2): - assert_graphs_equal(g1.G, g2.G) - assert(g1.history == g2.history) - assert(g1.exts == g2.exts) - - -def test_process_interface(g): - """test various ways to invoke process function""" - repo_path = os.path.join(root_path, 'repos/test_feature_branch') - g1 = CallCommitGraph(repo_path) - # A B - g1.process(from_beginning=True, into_branches=True, num_commits=2) - # C D - g1.process(from_last_processed=True, into_branches=True, num_commits=2) - # E F K - g1.process(from_last_processed=True, into_branches=True, num_commits=3) - # should see "The range specified is empty, terminated." - g1.process(from_last_processed=True, into_branches=True, num_commits=1) - assert_callcommitgraphs_equal(g1, g) - - g2 = CallCommitGraph(repo_path) - # should see "No history exists yet, terminated." - g2.process(from_last_processed=True, into_branches=True, num_commits=1) - # A B C - g2.process(from_beginning=True, into_branches=True, num_commits=3) - # D E F - g2.process(from_beginning=True, - into_branches=True, - end_commit_sha=g.commits[5].hexsha) - # K - g2.process(from_beginning=True, - into_branches=True, - end_commit_sha=g.commits[6].hexsha) - assert_callcommitgraphs_equal(g2, g) - - -def test_save(g): - fname = "test_save_g.pickle" - g.save(fname) - with open(fname, 'rb') as f: - gp = pickle.load(f) - os.remove(fname) - assert_callcommitgraphs_equal(g, gp) diff --git a/test/test_graphs/test_detect_change.py b/test/test_graphs/test_detect_change.py deleted file mode 100644 index 595d536f24f..00000000000 --- a/test/test_graphs/test_detect_change.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -from persper.graphs.patch_parser import PatchParser -from persper.graphs.detect_change import get_changed_functions -from persper.graphs.call_graph.cpp import get_func_ranges_cpp -from persper.graphs.srcml import transform_src_to_tree - -dir_path = os.path.dirname(os.path.abspath(__file__)) - - -def test_detect_change(): - parser = PatchParser() - - # view parsing ground truth here - # https://github.com/basicthinker/Sexain-MemController/commit/f050c6f6dd4b1d3626574b0d23bb41125f7b75ca - parsing_truth = ( - [[7, 31], [27, 3], [44, 1], [50, 2], [70, 1], [77, 2], [99, 2]], - [[32, 44], [56, 70]] - ) - - # view function ranges ground truth here - # https://github.com/basicthinker/Sexain-MemController/blob/5b8886d9da3bb07140bfb1ff2d2b215b2dff584b/migration_controller.cc - func_ranges_truth = ( - ['MigrationController::InputBlocks', - 'MigrationController::ExtractNVMPage', - 'MigrationController::ExtractDRAMPage', - 'MigrationController::Clear'], - [[8, 28], [30, 52], [54, 79], [81, 100]] - ) - - changed_result = { - 'MigrationController::Clear': 2, - 'MigrationController::ExtractDRAMPage': 18, - 'MigrationController::ExtractNVMPage': 16, - 'MigrationController::InputBlocks': 3 - } - - with open(os.path.join(dir_path, 'example.patch'), 'r') as f: - example_patch = f.read() - parsing_result = parser.parse(example_patch) - assert(parsing_result == parsing_truth) - - with open(os.path.join(dir_path, 'example.cc'), 'r') as f: - root = transform_src_to_tree(f.read(), ext='.cc') - func_ranges_result = get_func_ranges_cpp(root) - assert(func_ranges_result == func_ranges_truth) - - assert(changed_result == get_changed_functions( - *func_ranges_result, *parsing_result)) - -def test_patch_parser(): - parser = PatchParser() - - patch2_truth = ( - [[0, 6]], - [] - ) - with open(os.path.join(dir_path, 'example2.patch'), 'r') as f: - example2_patch = f.read() - parsing_result = parser.parse(example2_patch) - assert(parsing_result == patch2_truth) - - # view patch3_truth here - # https://github.com/UltimateBeaver/test_feature_branch/commit/caaac10f604ea7ac759c2147df8fb2b588ee2a27 - patch3_truth = ( - [[10, 4], [12, 1], [14, 1], [17, 13]], - [[9, 10], [12, 12], [14, 14]] - ) - with open(os.path.join(dir_path, 'example3.patch'), 'r') as f: - example3_patch = f.read() - parsing_result = parser.parse(example3_patch) - assert(parsing_result == patch3_truth) - - # view patch4_truth here - # https://github.com/UltimateBeaver/test_feature_branch/commit/364d5cc49aeb2e354da458924ce84c0ab731ac77 - patch4_truth = ( - [[0, 27]], - [] - ) - with open(os.path.join(dir_path, 'example4.patch'), 'r') as f: - example4_patch = f.read() - parsing_result = parser.parse(example4_patch) - assert(parsing_result == patch4_truth) - - - - - diff --git a/test/test_graphs/test_inverse_diff.py b/test/test_graphs/test_inverse_diff.py deleted file mode 100644 index e0a390ad7ef..00000000000 --- a/test/test_graphs/test_inverse_diff.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytest -from persper.graphs.inverse_diff import inverse_diff - - -def test_inverse_diff(): - # view parsing ground truth here - # https://github.com/basicthinker/Sexain-MemController/commit/f050c6f6dd4b1d3626574b0d23bb41125f7b75ca - adds_dels = ( - [[7, 31], [27, 3], [44, 1], [50, 2], [70, 1], [77, 2], [99, 2]], - [[32, 44], [56, 70]] - ) - inv_truth = ( - [[65, 13], [79, 15]], - [[8, 38], [59, 61], [66, 66], [73, 74], [80, 80], [88, 89], [112, 113]] - ) - - inv_result = inverse_diff(*adds_dels) - assert(inv_truth == inv_result) diff --git a/test/test_graphs/test_iterator.py b/test/test_graphs/test_iterator.py deleted file mode 100644 index 418572122f5..00000000000 --- a/test/test_graphs/test_iterator.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import pytest -import pickle -import subprocess -from persper.graphs.iterator import RepoIterator -from persper.util.path import root_path - - -def serialized_messages(commits): - return ' '.join([c.message.strip() for c in commits]) - - -@pytest.fixture(scope='module') -def ri(): - # build the repo first if not exists yet - repo_path = os.path.join(root_path, 'repos/test_processor') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/test_processor') - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - repo_path = os.path.join(root_path, 'repos/test_processor') - ri = RepoIterator(repo_path) - return ri - - -def test_iterator(ri): - commits, branch_commits = ri.iter(from_beginning=True, into_branches=True) - # from A to L - # use `git log --graph` to view ground truth - assert(len(ri.visited) == 12) - assert(len(commits) == 4) - assert(len(branch_commits) == 8) - assert(serialized_messages(commits) == 'D C B A') - assert(serialized_messages(branch_commits) == 'G F E J I H L K') - - -def test_continue_iter(ri): - commits, branch_commits = ri.iter( - from_beginning=True, num_commits=2, into_branches=True) - assert(serialized_messages(commits) == 'B A') - assert(serialized_messages(branch_commits) == '') - commits2, branch_commits2 = ri.iter( - continue_iter=True, num_commits=2, into_branches=True) - assert(serialized_messages(commits2) == 'D C') - assert(serialized_messages(branch_commits2) == 'G F E J I H L K') - - -def test_rev(ri): - commits, branch_commits = ri.iter(rev='C', into_branches=True) - assert(serialized_messages(commits) == 'C B A') - assert(serialized_messages(branch_commits) == '') - commits2, branch_commits2 = ri.iter( - continue_iter=True, end_commit_sha='D', into_branches=True) - assert(serialized_messages(commits2) == 'D') - assert(serialized_messages(branch_commits2) == 'G F E J I H L K') - - -def test_iter_twice(ri): - commits, branch_commits = ri.iter(from_beginning=True, into_branches=True) - commits2, branch_commits2 = ri.iter( - from_beginning=True, into_branches=True) - assert(commits == commits2) - assert(branch_commits == branch_commits2) diff --git a/test/test_graphs/test_map_id.py b/test/test_graphs/test_map_id.py deleted file mode 100644 index be93dae433a..00000000000 --- a/test/test_graphs/test_map_id.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import subprocess -from persper.graphs.graph_server import JS_FILENAME_REGEXES -from persper.graphs.graph_server_http import GraphServerHttp -from persper.graphs.analyzer import Analyzer -from persper.util.path import root_path - - -def test_map_id(): - repo_path = os.path.join(root_path, 'repos/js_test_repo') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/js_test_repo') - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - server_addr = 'http://localhost:3000' - az = Analyzer(repo_path, GraphServerHttp(server_addr, JS_FILENAME_REGEXES)) - - az.ordered_shas = ['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7'] - - az.id_map = { - 'c1': {'A': 'B'}, - 'c2': {'B': 'C', 'E': 'F'}, - 'c3': {'C': 'D', 'F': 'G'}, - 'c4': {'G': 'H'}, - 'c5': {'D': 'I', 'J': 'K'}, - 'c6': {'I': 'B', 'H': 'E'}, # make two cycles - 'c7': {'B': 'L'} # remove a cycle - } - - final_map_truth = { - 'A': 'L', - 'B': 'L', - 'C': 'L', - 'D': 'L', - 'I': 'L', - 'E': 'E', - 'F': 'E', - 'G': 'E', - 'H': 'E', - 'J': 'K' - } - - final_map = az.aggregate_id_map() - assert(final_map_truth == final_map) diff --git a/test/test_graphs/test_processor.py b/test/test_graphs/test_processor.py deleted file mode 100644 index 8260656e2bb..00000000000 --- a/test/test_graphs/test_processor.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import pytest -import subprocess -from persper.graphs.processor import Processor -from persper.util.path import root_path - - -def setup_module(module): - # build the repo first if not exists yet - repo_path = os.path.join(root_path, 'repos/test_processor') - script_path = os.path.join(root_path, 'tools/repo_creater/create_repo.py') - test_src_path = os.path.join(root_path, 'test/test_processor') - if not os.path.isdir(repo_path): - cmd = '{} {}'.format(script_path, test_src_path) - subprocess.call(cmd, shell=True) - - -def test_processor(capsys): - repo_path = os.path.join(root_path, 'repos/test_processor') - p = Processor(repo_path) - p.process(from_beginning=True, into_branches=True) - # from A to L - assert(len(p.visited) == 12) - out, _ = capsys.readouterr() - print(out) - assert("Commit No.8 Branch No.3" in out) diff --git a/test/test_processor/A/A b/test/test_processor/A/A deleted file mode 100644 index f70f10e4db1..00000000000 --- a/test/test_processor/A/A +++ /dev/null @@ -1 +0,0 @@ -A diff --git a/test/test_processor/B/B b/test/test_processor/B/B deleted file mode 100644 index 223b7836fb1..00000000000 --- a/test/test_processor/B/B +++ /dev/null @@ -1 +0,0 @@ -B diff --git a/test/test_processor/C/C b/test/test_processor/C/C deleted file mode 100644 index 3cc58df8375..00000000000 --- a/test/test_processor/C/C +++ /dev/null @@ -1 +0,0 @@ -C diff --git a/test/test_processor/D/D b/test/test_processor/D/D deleted file mode 100644 index 17848105018..00000000000 --- a/test/test_processor/D/D +++ /dev/null @@ -1 +0,0 @@ -D diff --git a/test/test_processor/E/E b/test/test_processor/E/E deleted file mode 100644 index 1c507261389..00000000000 --- a/test/test_processor/E/E +++ /dev/null @@ -1 +0,0 @@ -E diff --git a/test/test_processor/F/F b/test/test_processor/F/F deleted file mode 100644 index cf84443e49e..00000000000 --- a/test/test_processor/F/F +++ /dev/null @@ -1 +0,0 @@ -F diff --git a/test/test_processor/G/G b/test/test_processor/G/G deleted file mode 100644 index fd792352985..00000000000 --- a/test/test_processor/G/G +++ /dev/null @@ -1 +0,0 @@ -G diff --git a/test/test_processor/H/H b/test/test_processor/H/H deleted file mode 100644 index a9edc74f384..00000000000 --- a/test/test_processor/H/H +++ /dev/null @@ -1 +0,0 @@ -H diff --git a/test/test_processor/I/I b/test/test_processor/I/I deleted file mode 100644 index db1a5a09f7d..00000000000 --- a/test/test_processor/I/I +++ /dev/null @@ -1 +0,0 @@ -I diff --git a/test/test_processor/J/J b/test/test_processor/J/J deleted file mode 100644 index c8f145fff1f..00000000000 --- a/test/test_processor/J/J +++ /dev/null @@ -1 +0,0 @@ -J diff --git a/test/test_processor/K/K b/test/test_processor/K/K deleted file mode 100644 index 7692e7d41c0..00000000000 --- a/test/test_processor/K/K +++ /dev/null @@ -1 +0,0 @@ -K diff --git a/test/test_processor/L/L b/test/test_processor/L/L deleted file mode 100644 index 05bef1a55a5..00000000000 --- a/test/test_processor/L/L +++ /dev/null @@ -1 +0,0 @@ -L diff --git a/test/test_processor/cg.dot b/test/test_processor/cg.dot deleted file mode 100644 index 693bc891aff..00000000000 --- a/test/test_processor/cg.dot +++ /dev/null @@ -1,6 +0,0 @@ -digraph test_processor { - A -> B -> C -> D; - B -> E -> F -> G -> D; - A -> H -> I -> J -> G ; - A -> K -> L -> J ; -} \ No newline at end of file diff --git a/tools/repo_creater/create_repo.py b/tools/repo_creater/create_repo.py index 4dcd142e10d..18758a4fbb7 100755 --- a/tools/repo_creater/create_repo.py +++ b/tools/repo_creater/create_repo.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- import os +import stat import subprocess import sys import shutil @@ -10,11 +11,29 @@ from git import Repo from persper.util.path import root_path +def rmtree_compat(top): + """ + A Windows-compatible implementation of rmtree. + This implementation guarantees the files have appropriate permissions to delete, + and would avoid Access Denied errors. + """ + if os.name != "nt": + shutil.rmtree(top) + return + + for root, dirs, files in os.walk(top, topdown=False): + for name in files: + filename = os.path.join(root, name) + os.chmod(filename, stat.S_IWUSR) + os.remove(filename) + for name in dirs: + os.rmdir(os.path.join(root, name)) + os.rmdir(top) def make_new_dir(dir_path): """delete old directory first if exists""" if os.path.exists(dir_path): - shutil.rmtree(dir_path) + rmtree_compat(dir_path) os.makedirs(dir_path)