diff --git "a/data/indices/sentence_window/5e146e7aac/docstore.json" "b/data/indices/sentence_window/5e146e7aac/docstore.json" new file mode 100644--- /dev/null +++ "b/data/indices/sentence_window/5e146e7aac/docstore.json" @@ -0,0 +1 @@ +{"docstore/metadata": {"89dabcdf-f292-43c7-abfd-eb0cc1cd36ef": {"doc_hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a"}, "aabd4e55-6fdd-4c8b-a0d3-a15f80ae25a2": {"doc_hash": "8eec7b6ea451ee7edf801e3d54f6fa03097989b495d09ab39dba98e7d23e00c2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bab022a3-166f-40d5-878c-8884ec28a68e": {"doc_hash": "caebfb40e4970fe9277e1ed1ef212480ec1fc80c5a95e570cb814f799273793d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "67c22b47-4006-4279-84ac-8ed8a95033c3": {"doc_hash": "294d0478472d92e69b8bee2a1d731be07228efdba6016e529e19c898eaa34392", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9": {"doc_hash": "00baca395070115343c3e7fe063eb3349608f025b233a4b7fd8571803261dd22", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35": {"doc_hash": "a8e8e96bc810bc6e5f775379cf4c4cf9536bf41d4a41bd2c4e96375ffc279bbb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dd0fddcb-cf59-4229-9761-07fe375dcf93": {"doc_hash": "499139b516407ffb933efbadf227589d08c1cc5cd1073ba6f24bf589ef365a7e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "978fa62b-6aa1-406e-9b9f-aeeae892855e": {"doc_hash": "437045979365835b84692fce686a7c1128c7587e4b85cd83924a947dde3be031", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a4081bcd-845c-4af3-8367-4566f59e84ea": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2bcb37eb-15d9-45b0-90a4-9839a4e03171": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "783934d5-5c03-4aa3-91df-0511ee3fe295": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f1537c3f-b20d-4906-9223-ba4db80d26ea": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2ec11b97-ca4a-4f04-af28-a28a973588ad": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "70a3da41-6cec-4681-ba3c-49e2435cf197": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dda1c6a3-28f9-4bda-b62f-dff4b810185d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d6392f76-98df-4a4f-8c57-87fc17bc43d4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "101ce625-0317-4b48-9c7d-ffa14b7ff2e4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "612e34a5-f745-45ef-a91e-61b5d08ab91f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d896c04e-1e3b-43d7-9126-c4022852cae6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0d478908-16c9-4bee-a1f6-a0b09a03e809": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "caa8c289-2580-416c-820e-ca38387fb3f3": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2f207ee1-2319-43b0-8519-dc7a066d9635": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2082ffbc-50a6-4245-869e-13f6cd05b84e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "507c7436-6017-4528-a1d7-2704c59e25f7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "98b3c0f2-3971-411e-b1ba-debf0b43e083": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "878cc548-b87a-44d0-a6db-215e183ce7c2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "18c60443-514d-4459-84d8-360fd8241f31": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cf6fb891-cac2-4509-87d1-65d0fad1ea5b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "764726e7-9acd-4594-b590-a42270feea79": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "27c57ac9-1cb3-493b-9c02-f2d0b28855e4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7322f358-1948-4f55-9aea-a451de512f2b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1cd51d40-0a9b-4f77-8b36-da9f272044a3": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4458f71f-836e-41d1-b9f2-183e5878227d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7dc34b2c-b910-4687-9c1a-94cf4247e83f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aeaa56ff-0b5b-4c6d-937c-cd35745872f1": {"doc_hash": "43268e21e2d03b5c25f4dd7ff41d63c7f3696e08f44c2021cd6e4b9c3226895b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "59bf8388-9103-4ffa-a97d-a353069b2ca9": {"doc_hash": "ef785d8137998c8f510e487d8671be757fff1a8b9c741e7b172f1f5e45e7fa23", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e013a9c5-8dcf-458a-ad54-d8b12410470e": {"doc_hash": "b0edb7880f02c59724b5f6addd8e6f6622755492f57753b461b75ac1ef1dd95a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5a46f5e9-219b-45d0-b79c-cce4a88153a1": {"doc_hash": "28a6a3cf3b9bf043a0afcab8b19bd890e7d6b880d35781cea621f08aa470ad40", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0212d949-c329-4c0e-b0f3-739a42833930": {"doc_hash": "f1c689e1003608d8ed2e7d19585dc8a087d91e758e80a1339f0cab427a24a2c4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fdaa9304-1361-4b1f-91eb-74fe75240fe9": {"doc_hash": "dbf4fd184044c3a4e958bcdf62f2eecb39192445e5f0e1c7124987eeb2f6163b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9f248ad9-d6d8-4c23-80e8-0be85c2512c1": {"doc_hash": "f293f9191016097593cd09ad21bfb39cbd427c46b1a779eeff4d2363f5574f4c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "46e47b4f-618e-4a8b-bde3-d6e2db0f4434": {"doc_hash": "0a5b1a294726508259852fb4f7c9c7e253f340ad4976c48898cec1cd8e0d0f92", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6904607e-ebd7-4584-b0e4-6901a6ded5c9": {"doc_hash": "78f9b10cdf5a27699f17bc59e9eeb7ed5e4dfb41891d838161785cf2e646afed", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "189d3591-5eb0-4e8c-8083-15146d6f39c5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2663141b-9f22-4456-bf58-ce45376a82ee": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1d5e2f44-030c-4b16-9486-3821ba0f64f5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "72f7a9fe-d9b8-47df-8860-8821a6a795ce": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1357de3a-3304-402b-9cc3-c41c92089f29": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3": {"doc_hash": "fef319934aa8a1756b6e6f0408168148725ca3dd65ed399f9491c7f6b12f8ee2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a7ae8c96-7b8b-4d67-96b1-3fd93962b469": {"doc_hash": "7b56372d4725aa773c707ff5bf5d23be96163a2f4c5a929a53177fdee1495010", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1": {"doc_hash": "d19a4636a2e81a169e378a46e30899528595028eef33d689966f94f8428ff4f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1fed8e1b-2011-4eae-9beb-b25bcb5396a7": {"doc_hash": "298395b4f564f398b1967b8f20a3fba5b6779b6a362e0f3aa8dc4ca43732b385", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "79b39b0b-c58d-4515-b58c-4501287cd5a6": {"doc_hash": "2e004dcfbfa47d2b1a1fa973e5d50f01cce4e47773bc3d4efbc0d50f739f40ca", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dbbb0fe6-8046-43e4-84bb-33efb81d9010": {"doc_hash": "04748b54ed90765b07a33509bbad447c4942010a78bc04c22913512e882a52ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a9e6fede-3839-4dcc-a97c-a31b3d738fe5": {"doc_hash": "30eb64ee5b6022054d3636faa2d655072942d32c843fb11708cda68d44dd6f60", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f": {"doc_hash": "03e8cd00f088f592f277f27c121af3a6d76b3e49ea56b26120e190c729747bad", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3c0dc0df-d398-402f-b60c-ea1b5eef7478": {"doc_hash": "e0ad0e73e6df7f046cce124410f4787734658f3ca2205640d51b04596f5eca69", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "babd73f5-1108-443d-9dd2-f9d573cb7337": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "21752a6d-37e6-4e72-8467-7c3aca49ffbd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c6de1561-d737-4afe-addc-f4c1df3896f7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4010c527-89ec-45f4-bc3b-0702fc15e693": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0b925135-db51-454a-abe8-4f1338dd016f": {"doc_hash": "50580be0be50c4b2325763259f932b4ae3d03678ca8dde737d0a1191c0324ff0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5a16f20f-94d5-46c9-8fba-d2c5653c4625": {"doc_hash": "9a8e6b075568606c26fd07e30ac1df296ccb7607112acbdc495e48c11c05f590", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "99084c8e-364e-4e73-b47a-be787bb2a107": {"doc_hash": "8239c1212269a05bb9a7cfef6aedabd999640ba0fee3597ae3fbf311e43d0f0e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6de4b45f-3d4e-4981-be81-49cdd631f2f9": {"doc_hash": "60670cb4e7947dab68c0f91bb5061629e00e0cc7ef859d331a4f5fb81a73d804", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ffec42bb-fed8-4d55-9de6-de0fdd96aa16": {"doc_hash": "b78e3e59fbcbca47a6d89851024a3b7efcc7be14d3b92f0d49665cb171836f88", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17f35e00-841c-4ea7-8378-3f48fc651993": {"doc_hash": "2ad6f8f602c681c115c9e565d30c555e90c38a3319038b9290767cf067c99441", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4d24ae53-f918-4097-a23f-94c9be43125e": {"doc_hash": "a674ba89f9a75147ba7c6cd3b08f98ad8e955f66c81bd5dc89cda968b3549da3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e4072659-f32c-4460-9a09-8439bf6bdd70": {"doc_hash": "40b977a86f152abd436181309d54c4cecdbc2b04cc04d96c813f73385c084f7f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "efd429eb-de49-44f3-8264-fd7b30873af8": {"doc_hash": "8b28231a20632bfa3b8d3fd6e3a5013dc7d8d5a67cb28f5bedc7fa1e070cd280", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c69be104-16fd-4e86-8e3b-e9f02e15be3f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2ca64856-cdc6-4999-9ecb-efaa18d55ec6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ef907f66-da87-4b13-82c9-ab181afecbec": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9b0d6415-7740-493b-9a44-cd5a3aaf7d13": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "740217d9-52ed-4e40-a368-2dd50cc01d8d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ca795ca9-59ba-4997-912b-09ff52304745": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b7135b08-5e3e-4701-9d20-ce9468e826c7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a159840a-c64e-4402-a930-2a5554fdb450": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d4a07581-a9e4-4ad1-8e89-a38aac6f079a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "75ccfd70-2249-4b3e-94d7-6839cd83c778": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "08d2b254-bdde-4efd-8e7a-06f18cba1baa": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c086bd0-2d3d-495e-bffa-33a55cdf36d9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6acc0c95-d875-4613-8610-ef4015b413fd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c45b8827-2c7b-4c4e-b08c-be963bcb4723": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "de581595-f4e7-493a-9c05-a8464b0e794c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bdfda74e-24bb-4e95-a143-6a8e3467a638": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa58176d-ea29-406d-9bcf-5d34aa5dab89": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "834ed8e7-f596-4bbc-8560-54ecceb13371": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "093c16ee-170f-49d2-be11-dd5e16e84aeb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a789d0e5-367f-4420-b723-f24130d3bf5c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c4b036bb-f145-4977-87f1-ecb1c17a2d37": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1d5275b2-71a0-46cb-94e6-97dee041d00e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c1f3995-67f2-451c-8839-a55791bebc8f": {"doc_hash": "6fd193b2d01323b91aac1c40b7630373797606e7ca6008e61c010fa961917cfd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "65faa0e3-e146-4ce5-88b5-cf22f879afe9": {"doc_hash": "bce2e645e03d965432c766e6c7261a642b55c0534ab071e05e2a7b8951f4e204", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b0412f5e-93aa-4f67-95a8-d482d39cbfd6": {"doc_hash": "3fd163c04a4864d780be9604459ed258357b19a5b647d13a7044655404035f97", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9cc1d81c-4140-4313-9787-81ce77bf8ac1": {"doc_hash": "4d434794f0ce58e5da699372d01c5f9a4850aceaab084c2211873bc37baa7fc8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "36363011-1120-43db-b3e3-fc0d1810b739": {"doc_hash": "15e56e8105b354bb18590d6dabca5508f0e36f5bc5d1f7878d6e97204003ac6d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d196081-f067-4f9f-83a7-7fd7e918cc47": {"doc_hash": "ba536574f882ed3425d4f8a61c5d4a6457d9071becf1d78b3e6282fcc7751123", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a550f6e2-6e70-4320-acfb-d4878568553c": {"doc_hash": "36a7ebdc3ab80b34221799a403f2e92fd7f2c3cb0546e3ef0e891a8e868f6d48", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce": {"doc_hash": "c4ff8a7f154426b8a19ee2de35e66d24e31a0a208408cf435e167396b3c234bc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "47c57a76-4bc3-4dad-b25d-ed7523b85304": {"doc_hash": "e789ba3971609748315d90ca2042b4c999755e507c7e5f66036cc7a210fa8858", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e7357c55-81dc-4084-8c5d-8093fa6868b7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6919eb0f-63de-4090-b6b8-129017b2ce6c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e08d802d-4c3e-4599-9d52-f193741f319d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bde27828-e6be-4770-ae2b-58763be28ddb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "014641be-e7ab-42a3-92f2-0f1bd7038785": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ffce041c-2665-4d39-9521-d0544e331bb4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5473394e-23a3-469a-b334-862600ccc619": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25c0446d-81ca-456b-80f1-4bce41d1b828": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f7d3365d-df08-4936-8522-e28c0ea54b5e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d4caf388-9558-42d4-be52-07473b5c5666": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "352da421-fb3a-4ae8-ab77-4b13552a425a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "84314055-0f03-4138-ac94-b0eeaa8d2413": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c20952de-7755-4ca1-b614-3f248af3bb85": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "39e50d86-4c32-4a52-86ed-4ed50a98693f": {"doc_hash": "88bac462f9117548da25ef38178ab0a0d06340c3b57eb1f1d3d9fcd34dcf85e6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cff100f8-481a-4770-b9be-06c363f9e15d": {"doc_hash": "bc3e9509b265e23e3ddf78792600fdc707dc7b271b07125df7c2c79f2b2155a2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "085a6f53-a455-4d21-ab9a-eec4eaf4a413": {"doc_hash": "957fb38c477a0a0b8e6f38bc3bf38b1654aa7d71d26eca315b16d939a6b60afa", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "254381f1-7218-48d4-b950-c02677074873": {"doc_hash": "4d5264f3b60cf3bde8282ccf142de33ee5acaca2fc8d6d0321d17e81a5d9508c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f277422e-3e8f-4cfd-854e-23d65bb7ac1f": {"doc_hash": "c2b4ad2f5122d7cbd201b553a2a600bca9d2fdc497d3700975be7882257ef4a4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "700efc3f-8618-4459-8280-c2f12a0baeb3": {"doc_hash": "b9819d8fb34c17333ba52147b332a07eb01437742975fca062270c4d9a15d650", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "54beb635-5444-411e-a512-2fac95901cdd": {"doc_hash": "54664c9c4eba3a45565afb4423e35c955925bb8057ba8ffa454d5ddac9b18312", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "61ce1819-7253-4549-856e-eba686e56983": {"doc_hash": "ce1331beac761fafc4d3fab301805eb35c11fe0778aa6101983faca4311c97fa", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c5d5e7fa-16be-4165-a8fc-d0d72b36a255": {"doc_hash": "41eef5ba78abe189a7e6f5e32c2629cc439a74691b43383761a9bd2a6ecc3b04", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "40583b0d-9815-4e55-8c22-e088caf3ae16": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "86445103-3997-42ca-bbc1-1cb2f6df273e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "612dc8be-abd2-4f34-bba9-f37c461f9ebb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d3b81710-74f6-40e8-b4c7-b85558a4cf5c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "45dbe881-b7d9-432c-8eeb-97c457995be4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "95569fe4-6b1a-4b8f-8645-6a1518ebf147": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "81da4f34-e765-41ea-aff5-9bf225435cfe": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "455116ac-4f68-4141-89b5-2eb940fccc32": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "41f650f4-924c-4cf9-981e-77834d0df780": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9546716f-ed71-4f4c-9a1e-235940d6c249": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "57f7a476-d8f8-468f-97f7-c9f6cce97cb9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0dae5e39-ab6e-4e64-b754-288903aa7a4c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b2a13496-06f2-4a06-b9fa-ed9391a16b12": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "66ac48b3-0183-4cf8-a9af-0864cb6b82f0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7fdc2a59-9008-44f7-991c-3f8b53b26a96": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "364d956b-896a-45c7-8286-bdbfafcd59d2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c3ca74d7-b4c5-4a14-9040-c12dd722777e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a7f369c1-654c-4aac-b1f7-80cabff1be14": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "690662df-c4ee-4adb-82a3-ec607bf3a4e4": {"doc_hash": "eafcde22cbf0b2e6552e8bbded61e87bd84c66c0b12823b64cecadb8b9f40713", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf": {"doc_hash": "6d7c53c886eafec343f727d915eb4513146e80bec49f27f58f9919ba8fde74bf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "182c9022-0a66-4a54-a0b1-896409b156f1": {"doc_hash": "5526f70abce0ab510c32ec0a0718163dd086bc01fc463e32d94371504e6847f0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e3e51562-d148-440a-b717-70541ab2a372": {"doc_hash": "ac2efb2a863903af7110505df58116f213136c5eedb8d2529a10ba3cf46f6735", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d063a6f3-7844-4591-80af-72014570c8ff": {"doc_hash": "c8df9e2011406e407db788a55fcc6cda3abd0edc938d5a7da0087492507c21bb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7fb0839c-bd58-41ba-862b-a88239c304b8": {"doc_hash": "74f6b87e3c26ecc362ff6c8b5fde56d84ff716b24816044a70c5e6a5cba883ca", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e7405003-dec5-48d0-923c-b11dddcb8c17": {"doc_hash": "d1fcd6d1c72e9a225e50fbf2ecba3db7a1986b4918e1f50209552d7c243d76a8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "62e4511f-14dd-4909-8f85-ce7e46749849": {"doc_hash": "d1a8b270d1b201aa16b57f856ce37d65e5dd3b95a4c3feba35e7d30df2f1daba", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aec8b991-d807-4438-801a-4a1eab70ceb4": {"doc_hash": "601b2dba29fa3ceccb02f7c706e7c9ce8e243f01df9c58e1599d52e0aff12179", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "faa6f58d-1705-4523-95d6-f9cd2d181d5a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "186ae934-b2fd-46ec-9f8c-bf30af97f134": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "096db7f6-33fa-4ee9-8ffa-90eb58bb3293": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6176aafa-9309-49f9-93df-634e6f34ab8f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "07285404-98ce-435b-9069-053dea5eecb0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "700b820d-7a5e-48e1-808a-0ab0aac3e1e6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cf7ca23f-6838-4ff1-abab-dd443b84da37": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "03c5c698-303d-4a5d-ab8e-323dcd3829d2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f88f8fa4-f946-4e76-a3dc-fe22584eb647": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a3f13bc9-118d-4a26-b51b-26386fde6fa6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6abf387b-a0d9-4a24-91b0-5036cc14c805": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6495d3fc-101d-4694-905b-5575e3fbb2b7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c4fb9d84-e7a6-4466-b211-3280cc9cc0df": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42895f62-3e8d-4395-9264-1f3786934b7a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "23b9910e-3887-440a-b3ba-9c05b5f1d71b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1062cc1e-28ed-4b37-8465-960e9c7316d7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3b787eaf-97de-457c-a1f3-166f640a7416": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "432ca6c3-1af0-4b51-bc4a-79781cce183a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3488e2e5-1638-4be5-b066-5b50629fd121": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "df97ac8c-1bd5-48af-82d9-0b2acea45d95": {"doc_hash": "66853bac961236c6ad32e31a119f89d5ba0ac38ebf283c294be13178e8195ce4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e213a7d8-d437-440e-ad01-60172f77d1ff": {"doc_hash": "8d441cfe16d341bc09df075a2d006841eee43a85408ce3ae3b1338653c49f2fd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d89b7b29-9b71-4a15-a004-6e5ab6242af3": {"doc_hash": "3349bbdc8e7f15a75f003eeab445127ccded08363931fe81f339ac28e3292932", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42616817-c368-418b-b4c3-f67435396f9b": {"doc_hash": "afda123cb10b02426a0114d35d8ad7413206eeccda573884ec1300f88177e6af", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cec812b8-07bd-474c-a35c-e2a17a85970d": {"doc_hash": "3687c7d305423115b01bd80a509365142150f3def0744d0cf284180e874826a2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4588df1e-9e69-490c-b75e-c136b3d73229": {"doc_hash": "8fb8eac9b8b59cc7c6a28e6e40157602fec6743f93256ddd903a92f8f484b3da", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "219cd3af-167d-405e-adf2-86ec7dace7ac": {"doc_hash": "13af3b242db8a50aeb02d8498508cf97182bc39462a93b3655b56851e7aeb66e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "35e9a18d-e69a-4e00-a2a5-658b1afa2e07": {"doc_hash": "83a1d764200d7621bd74915435eb7b4b370113d8713c451cab391891a08c49d1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cc6683c9-d7c2-43ff-a762-e345d7ab280f": {"doc_hash": "4358c106d6cb63308ff5ada0da3ec316d30dc4577083d1f79893f8409cd88e01", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "14f92f7b-213b-4925-a0c1-750522b2d0cf": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ca9658ae-d6d5-4b79-8baa-445d8c194af2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "97afdf3a-8f6b-4bba-9def-2e12251908bf": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "69af4999-2384-4e5b-a2ed-f8a50723499e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "59d365ed-06ab-4c1a-96d6-887862ad2c62": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3aff52a9-0551-48c7-a643-57cbd40168b0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "93ac8471-95ef-49c9-8d81-d35fac819a2b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b8e0698a-9c55-475b-af11-efd14e30a941": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "880d5c0d-a4a2-45ff-b26a-e9d19d12a055": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "794d6a0c-089a-4a9f-8170-29c20b3ec300": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "811e812b-53e4-48c5-85f5-ad9c130b1f5b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "757b2a07-cb08-43e4-b855-915e793cbc7d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1f81c1a7-c32d-49b4-be38-48559bae2c10": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1": {"doc_hash": "ec2be32330a7fa69a76635d58c2a806742a6bc82bbb60828586960027dbfa6b6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1": {"doc_hash": "19937be2f0a5784209586333ffe52ffe7e36b4431e9d2cecd98e4113773e4a07", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "00404da5-017b-45e1-9395-ce4bebd3c9e8": {"doc_hash": "4e935d052048f31fb746a91439449eff4fc858dc7c35733e7716e1078cef18c2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a25272fc-ad0b-4ee8-98f4-d95344e83cca": {"doc_hash": "e02c3c1832255ddd9cfd3a2ba3d512d29d09a17fe0450dcab4e1c28f7443d403", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0": {"doc_hash": "6ae7b4ee3b818d02ccb05a16910fc558610d1706673747da738030856f80662d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "955c63f1-3690-4d8e-85a1-b2be77dc98ce": {"doc_hash": "fbfd95bc96f9b054b73e529defdd875e51377f1a313e0422213583de160f6efb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "babc6b05-6b92-4cec-aa10-fd20dc009ec9": {"doc_hash": "2a3569ac91e29f7b6c1ddc5cbd96168ff2615d91e126f879626fd1e5d0781614", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "53cba9d9-7973-4bc8-9080-f2510ef7bc36": {"doc_hash": "7a52a5d8e659f5759ab0a898f8422a05a7ea48a63ea158b7a11288f6995e2135", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec": {"doc_hash": "be91fb1c561b394e986b4d0e4c99f5747e6f2e98957d79e868b8dbccde6a811c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9e0bbc2d-6193-48cd-a07c-80474253322a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3f194125-734d-46f4-a9fd-ddd29f5e2183": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "be1ce379-c349-40ad-afdc-71a7936e60c6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c3296cd0-6365-4c5b-a6d0-411e8c3d0577": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "58aae4e0-a962-4b38-9a6e-a687ec00f30a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "33fb6106-4d90-4c8e-a437-8a357e114dbd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "616c86b6-7abc-4204-aaba-a811e22464f7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "29cd9cd4-b945-4b31-a61b-a81974170bbf": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2aa81b72-9837-4a28-89c4-7263550df613": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4fba19b2-0cce-4256-9f0c-99191571e2d1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0634617f-9e21-4686-959c-80dd984f5d11": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ff12285a-3231-40f9-99be-c91d13278f97": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "29c922cc-48a1-416a-966f-a3ffd0edeee5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "35072137-7803-4223-9b54-60db59633a2f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "49faee0a-80fe-4fc7-a2be-8e51ff8d268c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2f5a8a63-1257-4665-9df1-b62bf40f4cd4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d55b5eff-093b-4db5-87ac-0becb7be3b3f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c50c2988-5b65-4178-a57c-59a678975f91": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5986e111-543e-41f6-b606-acd521d4d469": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "95604239-1db6-4289-8b4b-157830379b87": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "67666908-fe9d-4d3e-a628-b012f49263ad": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "351bfba7-c40f-4b24-936c-695c8922400b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "912e3efe-ce86-4e91-9af8-3699b294517a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d9c1f9ec-ab20-4a1d-b35b-312c5299601f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e59e4445-a4bb-4179-bb7a-9137b7d7f70d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "942783dd-0d25-47fb-82fe-d40113812f52": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bcbea5b5-fef2-40cf-acb3-84e513365e95": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ab830d4e-5aee-40e7-9563-705aa609ee32": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4f9e84de-6297-417d-91fa-dd284764cb8c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "13557eac-bdb8-4e98-b6c9-6e1502fd8977": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2d4a2448-bd3f-47ef-8120-61f108a3931d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "515f35bc-6805-4539-8dd6-c0bc8442c7df": {"doc_hash": "93ab575239f30a383746dc6d679a5262965359773b53d0981d3049168a7a480e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7d3b68a9-9684-45bd-b513-5355cc342493": {"doc_hash": "007b9ce5a595f827bbc91c8dd39d487e519a06f527aeb45083d70cb645c754b7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0df8094a-0fc9-46d2-bde9-db17da7e0798": {"doc_hash": "968c63e03e2deb4aeeeaa8431afc6d87489e4ec8816a3e7149c3703eb629fb27", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d200200a-c5dc-4197-a791-607008c88d14": {"doc_hash": "e48470f2fd6657e8a90eb30460b9bd267f9ff6d1ac0dc7d357e84dde65e87f20", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ffd57124-4e66-446b-930a-d1498b742362": {"doc_hash": "f9dd41e2e99f660916828d673c51d860279cdd3fb883d6c75890cc6660a9dcdb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "69f8add1-0f66-4bd8-9706-b9f8bac2b648": {"doc_hash": "ca1affcabe12bf8ee16121e8c5e3e5f3c875e641e754023a97d277b531ba1895", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "66d9162f-b0df-4d11-b4e6-232647daa677": {"doc_hash": "28e1d705964971f413654c074d001401e4be584259c495e2c4c548e54d87f570", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "155ce6f9-70b8-4b9d-b137-36b9951b8036": {"doc_hash": "dbef2fe6910adeda589d03ce71ee6add5f37e1503324de6e3c4d928e2fd19b26", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b576c41e-e76f-46ca-b572-35158a29b8bf": {"doc_hash": "8c3e773a86884a93a8eb2ed8ba1fb8319cc3231068aec1487c6d0d5363dcf26b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d6a25275-5b62-492f-85af-622d4d555dd2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2a75dea0-7dbe-4161-ae51-b5fe341a666b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5aa5aaee-7a64-49a8-a060-ede2373823a3": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ee0bec49-a86a-4348-888b-79d6fce5b11c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e4740a38-96fa-47ae-a521-176c04c48be9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "869f1ddf-9fc3-4f1f-aa3e-9b811531a093": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d2ea7cc0-8426-4708-af74-94d178188962": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1e745c01-a8ca-49ee-a307-aca3a88eeb62": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9055b746-7c49-483c-ac7d-1cbeedae37b7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7006d86c-2845-4db9-8817-df0a7e5bb69f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2036e1f9-6e05-4fda-a8c8-d447badce37f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3ae182f1-c658-4a98-8cb9-ad409c89333a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8fead42c-fede-4032-a21c-e015976afe68": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c9a603ef-68e8-4ae4-8b90-52c5af768453": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ae816606-c3a5-4306-831e-946db4952968": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7d4cd77c-5acd-4d9d-a5c0-095110f05de4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ae428e09-e434-4705-9982-207ed1ad74e6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6e221fb0-a26a-4695-a84a-823b7f3e46e2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d4fec36c-0dab-415b-97dc-9648b7597a47": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cdd9e12e-4f6b-4580-96e6-076a5400dd7c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2f6fe182-a7b5-42ab-8590-08f808cce910": {"doc_hash": "e497a4a5f9380bb1cad8892663da6d9ca308b8ed4aae07d24348fda6b585b98d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861": {"doc_hash": "fd4df70e492eb72615caaca3c48191ea905fd04bd585d0fa078970a8d7598422", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7be05165-dc5c-495c-bdad-233b71645d77": {"doc_hash": "15e3b8f010e513447a39a56452424ba9ddf2bb6b7fffa1248d4a561a24fd5108", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "415501cc-5c85-41d4-b498-139b1e5b59ea": {"doc_hash": "d52d4282378dfdf2d74aa9753ff736cf871f1f76302f9beb88ef28051d11ad6b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dc0f2644-9886-4bd3-b613-4bf23c540d67": {"doc_hash": "5b7b131c8662ebb83f0508e04f5a6ff14f1f667d94d7b1743365bc25cdb22f23", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1ec46459-5948-429c-90a7-bba74d930e53": {"doc_hash": "eaac8f050606db56ed7ec9d21d143d322570da530d1829ff2d112250168d2b6e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "839a4b9c-5a25-4c58-9b36-1cda8b399b23": {"doc_hash": "e15cfa0eecb7adf73cffc69e7c6c5f6959066b98554588ffe4df89a76367919e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "05eb5f60-62d6-4053-ad8e-a59f7feccfa5": {"doc_hash": "8e92698c8cbf7b5b61673affccf56644ea80964c52ec7873f46a2542d3fc2975", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a6439e08-9f9f-4a55-9322-a745e30da0e0": {"doc_hash": "505a3753458d8f1f7f52faf8580b4d300585a22277f19358e8471e819aaba168", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dce9210d-fb37-4476-b0b2-78c6f20831bc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "255f44fe-4ddb-42d6-af44-4ee3e484b8e2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "110eeee4-be40-488e-9482-bd4d6d425399": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dfb31c8e-9ed6-4981-b84b-e61e33472f5d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4967230d-0a80-49a7-b7be-0dce5a9df235": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2bb79a49-347f-46fa-97bd-a7665ba268a4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3614c960-f31a-41c0-822a-58b78a90fa8b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "96f5db1d-6112-4a0f-8d59-bf99cea35d8a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3256e618-3a73-483f-9a7f-42dd04087711": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4b8852e3-782a-45cf-b088-58a1b924c43f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d367467-67f1-4367-9aeb-564f16645f66": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "29bd39e9-f988-4835-b452-655d140b8d61": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5fdd60bb-42b4-4a39-a711-8beccccf373f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "979e6a6b-efea-41ca-a7c9-9668f04fbcdd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a90ad3d1-bc04-4489-a049-5c713d52d173": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c3b62386-aba7-4f94-9000-7f1a28d2ebab": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f1af395a-2de9-4ead-8640-5d061341b348": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1cab2a1b-a166-4690-b494-ec86a00e79c8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c0e635e7-5e5a-4090-9806-04f1987465a4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f00071e5-7b83-466a-8454-e7d253f262dc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9d1ab84e-9be0-459c-8305-c9353a13e749": {"doc_hash": "88c5837f5b6c481cfb17c85696c1cce11ffa5f33031a3a3f2ffc20fb120e36e0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec": {"doc_hash": "4e6832dbb03387a9a5c5012863318cc6ec1afba3303717dc672bb3eb4223836c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9aca5a0d-bfa0-4caa-887c-829d210fc9e4": {"doc_hash": "a2db21686a66e4ffa5784f0be861b8479d3ed7e10da68f353934a22a18b6917c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a093a811-9296-40fd-ae21-bb64d1c3926a": {"doc_hash": "800ed23bcc6b441a1d9f68c88a7296142f36a3c18b036212ad0bf94f5ac7de33", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3da0aa0a-6c63-496b-8464-917ae9813f4d": {"doc_hash": "b0a7d9900843dad2305e5b95260da53fbb1fac93158bb538bda0ef58da4cfee4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3ae4bc93-ad47-47fe-9487-34b3926c9cc1": {"doc_hash": "d8387aab8c9910b78423d6d5409ae095327b112175383f8dc8366b967ac8d305", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a": {"doc_hash": "440bd65fe62a37d8cd2ad836e7e1555e1537327413db1b4e204819d304bc5474", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "53f66baa-e165-4c00-9798-78b2af372b1f": {"doc_hash": "15a269acfd6578ba723433f4b3fbe1ac4bb70aba8f28e139d054ff4bfb572a11", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "af635190-17ea-4bea-9a09-53fc3fa5cde0": {"doc_hash": "957e64c2665a501e46f4ce0bedc0ad9e17050929f444dba7eb864aac55ab6d2d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "21c4379f-21a1-49d0-91b0-d3bba46ea2f1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "81137b75-4255-4120-bb72-1ae7efa1325b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d46f79da-9279-46dc-9a5f-f5a3deec2845": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "730acaec-9cd5-438d-83a3-906f6fac6c1a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1f260bbc-837b-446f-906d-401583250bb8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c0c9484-6d53-456e-a2c2-36be82b612f2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f88cf895-0e45-4513-97d7-bf3006ec340a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b93cc96d-a274-4302-9907-e1a8740a8550": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "496a8931-174b-4ec1-b4d0-8240115aa89c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7781f9a9-0ad8-430d-8953-c9a8dcd59017": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "86db7322-ebb3-4d85-936c-bf9e304bd9ea": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c8393f43-baa5-4c12-b51f-37ba84411a70": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bab49fba-279d-4ddf-99d1-6706521a71a5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "77280196-f248-4806-b532-4e77f0d5125b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "946226d0-b703-4cd6-a543-3ec9a1c0454d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "81a99583-c412-4cbf-b317-4575c17cff32": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f1dfcc71-ce5a-485d-95e1-c39f11ad469a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "09fb64f1-efb2-446a-9970-76715defe156": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "94893e9c-a839-4894-a278-be34a33dbe47": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "54d5a007-6cca-46c6-a9d6-1c883f484a9f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6cbc6819-3e97-4906-8796-87e467f514b9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "092ce679-7ef7-441d-aeb5-ce1821c9d7b1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9edf7264-654d-4cee-9cda-9c5181aea82e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9e0ad855-0f6f-4517-ba5a-6601e91c61c5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a3b6b725-aff0-4f13-aef5-d15c79f568ba": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5129a96e-3298-46f8-af07-6aa5354359b7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e20345d0-f37a-47b3-a84d-5870f8ed09eb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4b889ce3-2f21-4aef-9699-6d9017f20a4a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ac5edc31-3f0a-4263-b238-52ba25d2428f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bade3cdc-cbad-496e-92bb-6aabafd99284": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0b30648f-be34-4884-8ea0-279f164999cb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e54e6309-4eea-4571-ac63-383e7b7d0e6e": {"doc_hash": "bdb506645c0a9b0dc1599018641dd836d5d3071d2742c0347108f70b587c3059", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9d380c2f-0971-4951-a89b-a02488b1c89a": {"doc_hash": "f582e3d1f617ebe3050f40746ea5e0d4d12b563deaf385e7f36e441cf1b2f10a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "71fb7290-95e8-44d9-a3af-772e78899ad9": {"doc_hash": "225e4bc7415326b5bb154eae7892a497656c730c7e5d971c812180d4b1ca5b8d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7e25fb1b-ec22-43e9-8955-2848f47cc53a": {"doc_hash": "6f61a32f4b3e1d36cd0fa90370bf3e45d684dedb2720640730069fb50475573d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3": {"doc_hash": "7d055530f0d17486ad2647083ac2d639be752dab64aa5a0afbe4fbf66481e129", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fd845739-d1a4-4871-88ae-b7d5cdd0764c": {"doc_hash": "257002bdaea2bca3a3521e658f0a3adf80ba8ca5045abaaac82cd18960bd5319", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "da92202f-20c9-4ce3-862f-27adfeccfafc": {"doc_hash": "59f83e545b8ed45da3ac76e1daa72cd5a61682cacb2b7e188602d9b135d8364b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "07c81a94-1fcc-4c02-a931-6deffb9413b1": {"doc_hash": "e9413569db1221627f44fcdfe117c4d267ad5901dc07185a54f3b6bd57c132ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "28e282d4-d383-4ef2-9734-e105eab0a626": {"doc_hash": "ad4c323eacf2a5f90ba3da6e58e1a84f07550f84e3702f3c2989b948151b7ffd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "355812e1-e2a6-4667-aa2e-98acdd554b75": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ecc06446-ed65-4244-ae9d-7d15416170c4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "092dab60-215c-4a6b-a318-baa4a71cb6e6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8b56582f-8d09-4ab2-bcac-a56d4e598d0e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "668b0c4b-a595-4f8c-a94b-7cb830568d45": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "149e944c-9220-457e-8aad-8d978e045fcb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3129721a-98df-4fed-8867-8a80ed5acdcb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bab2dbe0-29f6-4f3f-952c-82fd967d2183": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5aa5a4c6-3c47-42ca-bbc5-9834a733c697": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a1063a3a-0623-4ffb-9966-3a2e837d464d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7da6c025-4ad1-4704-8ffd-47687a86d70c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5d999c37-1884-43d5-98e8-af8f1cd7dc7e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "64fa903e-9660-4635-a981-d294f06a84d5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d4d84da8-a760-4268-9aef-a65ec802e334": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b07c3f13-1d2b-4d31-ae22-c49fe559beea": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f7ce6b27-91aa-452e-996f-f57452dc85ab": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17871de6-4898-4e90-91ed-8033633283e8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b349a5a0-326e-4005-b01c-255633ac3afc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2930af96-d7f1-40fc-b07e-559ed43d60df": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d86b3b8-ac8f-492e-8472-3fc387043a69": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "13b3685c-15b4-4115-b0c9-f3dcbb9508f7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe681619-8376-432a-b85e-338bca89aad7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bc9eed1e-c16f-40c2-8a18-4e4725dac437": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cf8aa5df-3520-47fd-b204-afb421cfc025": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "14a35976-41ea-4c11-a2a4-43db46bf3f6a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2d0ed9d0-6464-4d90-8711-abe0602cd051": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "694a7c54-f39c-4995-9140-01b087727fea": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "18dcadee-14c1-4814-bd7b-c294c45c679f": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a44f06a5-9aff-4e26-8d2b-99ce774131ca": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "45cac001-01a0-4da7-8f3d-60263e6f879b": {"doc_hash": "9e83caf19079d7c9b4eaad193fc10307ceeb2d41fc6e1f000d1a6a3e0a43e2de", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "26da1a0d-8331-459f-91ff-1949035b46f8": {"doc_hash": "820ef9d34c6170070a883a543b3aee6733dbcacadaa6274c3bf4dd90957acbed", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb": {"doc_hash": "9d729e267043de586aecbb5c56c3de61f8f395c3768271be2df4874275d7aca2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7": {"doc_hash": "4d2f29d6d65f3f211e8fae159387cc9d8e623089aab62124017b56a4623186b0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "73afef95-0f70-4310-a611-ed3a2059ce0b": {"doc_hash": "472098ca692e3feb03319da8c8abdc1d1fd92e3d711284ade6552dfbe20e29b5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1323a387-36d6-438f-b7d0-dfe2a2deed7d": {"doc_hash": "f6f4436c431f063b7eb06a72bc8a0200c909c2c1a555994efb5bf1beb360240d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1f54059a-c723-4c42-96c3-2524ce7e09d0": {"doc_hash": "3ab99f2afa3d9328d579e2ccb952fca33c77c721fd11d83455d863dcaf88dd42", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a1a1c3af-82f5-41bb-b6cd-d37020208ba1": {"doc_hash": "c6df5c0e536823d56c90bb65fd13b4a1f9477f394762efc085b7c2584347c407", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "738e008e-920d-4eda-8be5-51b0e3023658": {"doc_hash": "76c2aa6dd0055fb1a973d716de8d643396c031bb13fc84086d737e9c25597745", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a518875e-e461-4b06-87cf-29b517f4324c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "240e6559-3551-480f-845f-c231d7e880c3": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "71d65038-5759-4e21-a35a-5812d4c33515": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b7a40572-a6c4-4da5-9588-f6084f3300a1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a8b55d47-4aa2-4b87-a158-ee5cb59645d0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "705a606f-907e-4ef8-98d2-6c2e0131b8af": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0889bdef-1ead-4d33-9a3b-1e6c833481ec": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8c20904a-ed17-475d-a8bb-2c30ba333adc": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "498efbbd-a7a3-4878-b748-fd8a566347f4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5045030b-9dbc-43f0-9e82-2cb8315425d9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "92a4d7fc-1742-4762-b5cd-306554f29e1b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e02b1a27-c64a-4020-8a40-e3f42e544108": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "376e6950-0ba6-4126-90f6-0efeea3846b7": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1a21dd03-afd3-478f-9fed-ec84bea64f17": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8bcfe243-3553-417f-ad9c-db3ff9c79752": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "40ea12f3-392a-4db0-9196-31f2e75ac941": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "611c51f7-fdf7-4315-95da-5695bd1832c0": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "95c07636-ecaf-4d9c-8814-7665fd813129": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "91038a72-7d16-4de9-a5cd-c188a75ac4c1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2c5c9baa-8321-46ed-8347-753933ecc87e": {"doc_hash": "ed4f30f758570bf0cf3174da054631cef3cf48772b574fe8d0eedfc553e4ba93", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb": {"doc_hash": "422d05881417848eef727f4e6a7965afe9956d271b479388507eb890d9e68007", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "74773f3d-aebc-40f7-9188-3b1aae82615e": {"doc_hash": "96c18c377cf0e980ef5605426fc7fa524443e072dd23bb8288c77c41b0f59267", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e6f4ab6f-6382-4a68-9385-24c615625fa7": {"doc_hash": "6e0717bbbad415bd35daef0217ccda05fbf7b8cbe645e91fa1147a66c6d7b044", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ef709998-c023-4d7e-9fbb-64bbc42a62b5": {"doc_hash": "d9f92286fb621058b95a4958ccfba1e7c711315ba2e618ed495d07d9205d9b15", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42547948-7501-491a-9364-bbb7a01632d7": {"doc_hash": "abfcc1d4d6a1755ee2ee631ea28f943f760f3223abd8dd7f8b5db853d4e82696", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c0a9226f-82c2-457b-bb4d-7dbdc124e13a": {"doc_hash": "ce5260a19dedf8771282968f8680482a445371b78746ac6307a9618c2be4fc5d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "51596bd6-9f3b-4986-ae08-0f2ba67eb203": {"doc_hash": "f58dc24c2632684e912df904bf1d8a143192c627e444b639b47fb54670ec37d1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2e5d58b5-ca35-485b-969f-f73f725495ac": {"doc_hash": "094d15d9d4323c058dcfed0a3fc1c8e0ff9301b3e46119f29c5fe3b018888c24", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9c7fb20d-7ece-4f12-8497-7c61878d52e6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ce8eafae-149d-4fed-a046-d986dd294b08": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa44127a-443c-4bb2-aa8d-9456a87183f8": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dff9eb92-be49-4a38-a041-75be8d101e4a": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7972a5ce-1d08-422b-83d3-3defc34635c4": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5cd26bac-e963-44d8-b266-0ecd93c8f5f9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "85726e01-c8bb-4b72-8133-f4322bb100b9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "92a0cb7e-7781-4f99-a9da-6e7d7234683c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4a35bf98-122e-4660-b913-7811a812da0b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b3545d8a-1238-41bd-85c5-2b52f6a2638d": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "37e1c58c-75b5-41e5-9d8f-25053e7a7836": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0ac5ead1-0b00-49f2-9863-50354bdcd934": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "49603d20-a3ec-4981-b1e5-fcd5466fd31b": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4c02a0d0-9eb2-419d-820e-1258c99ae8fb": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "61344edf-cb22-489d-8d2a-c4d71df6be50": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c7d58f76-0d5a-41f3-b0c7-dae162d95354": {"doc_hash": "1ab29086e98edf25696ef8fe0900e98f490c47e547b151fe3e59732a92f7b9c9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c71e28b5-35bc-4174-99c1-b62a8933aab0": {"doc_hash": "6dbc810bf9669ac35870478e6ce5b3f80edd2cdbac720adbb37921e13ffae287", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eaef2d03-3d2e-4eb7-91e6-2a44756008fb": {"doc_hash": "aeecf3c4886b7730db7b3f6a2f80c21f854c551603243528b89cd09a0c628f68", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c": {"doc_hash": "c16b8fc5c9deb6455f34bddba0ef656e70408a5ab0f15d10cc407603c2405980", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361": {"doc_hash": "a160490c2051972d5c9267b0a245eaa3362d56ef89bf39f482acfba50567ddf9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b5b12580-b1f6-44a9-a84b-8c0cb919481a": {"doc_hash": "e4c5edebaff10a32c19143ad17a9ae07a20899d82c689053a4ca66f7e9b675d4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "32777b9b-e11c-46be-a408-4d6931b3ec90": {"doc_hash": "a7ef97388b9528119ff780a8989b32475fd8cef6b4a051414bf62c98c0976ea9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b16982ba-8d3b-44ab-abf9-80e7bfa2336f": {"doc_hash": "baf0e2cbc3e38c26b35fde6b88fdb832c1344ca2ed1c9cb2151f4df0d4684cf4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1deec3be-1a85-413b-8f19-5855ace959f1": {"doc_hash": "bf43ebb6e53069d70db763eac3cc6f4b6af727b1b7b225d27d0c996c052cd2d2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c3b8f589-e6a9-4d30-9336-5dc750073020": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1c7917b6-836b-45e8-b13e-bb0b75865073": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "95c4701b-a448-427c-8b5c-fa0ef38e5aa1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1acacba0-04b5-42dd-8988-0f8922a683e9": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "920c5b5c-04fe-45b8-a89e-729379fbf001": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1096dc7c-2770-4004-8e7b-62263bad0667": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5b8c28f1-7f40-4db7-88f3-afee7af17880": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d9b99f43-2498-483f-b04e-bed888bd8796": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c03beec-36cf-4c0b-b997-7e4b6bab09cf": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2932113b-b43e-45f7-860c-fd6c431c4a07": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9a05bf37-1625-4711-be55-81a1380b787e": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "85d2d350-ef7c-47f0-9b93-c6d8cca2594c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c": {"doc_hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b94db02f-479f-4c37-87d4-87c7c50170f0": {"doc_hash": "e6a897b9060ed70a0989ee73a1f1c8d04294fd8888e896c326e57cf70d3b9ac8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c1b894a9-7003-4f7a-a787-2b661cb3da7b": {"doc_hash": "ab637ef3177a66e61941fbdb7f35e5aebd20a3a572ebbfcb4cd1e7344b21545e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "16ea94b8-d9f8-477f-a23f-d638b9d09b34": {"doc_hash": "c7d81cc9a10cad7dab9fc15244290ba6d2f11e82e47a0347029e72e477d618e1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "83582d80-e9e8-4e4c-b926-96a556371f02": {"doc_hash": "db9666257e0e69f4189f10093dc156079068f168d196a4ec67ac77a4020b27ba", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c": {"doc_hash": "92b7ebd934315879d73bbe48fd4f523dd484622813141588eb66b01a00e25c2d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e5a8d498-6b22-4b49-985d-82c37585e9a3": {"doc_hash": "ed646248a86639a6156093ecaf1edf7531d3cfc470b243906710fb5a227e492c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7": {"doc_hash": "f1cf65fb0a0067134e6ee20d12d0f1cdf4b996fb1fc8b9c756fcaeba456d2313", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "886e17c4-6041-4933-98c8-6fc49b3c9174": {"doc_hash": "0e6fe682d4568132fafa8aa486c80729bcd713a731cb787c024b2e8c3cdcb276", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d2ea5f50-ce5d-4a38-9780-da61dc90ee12": {"doc_hash": "350a824a8d5630e015d27b3698bf19a159796315af02c85b3b511af7ef479e05", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "60f46972-4e33-46e3-8ecb-4d20e13c1a05": {"doc_hash": "73842178ecfa8b1b8937c994ce5d175692524d62dec0d197f45d1554d76231f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a28da878-830d-4c9f-aa01-e3c48a9339ae": {"doc_hash": "4b6a6c1144eeb461ae6dda03179528b5c008c1ab5ede51b7fec1d76c48ec89a1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1575b6dd-14bf-4610-b839-01130495ea45": {"doc_hash": "5b458c253a01c763f26cfbb3ccf80aadd49520d79f1c18d2fbf8e96bf90f8dc8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74": {"doc_hash": "d29c86a9ba7ba1ae90895da31e37da9e860f7a65657cf4a5539522d4421cd642", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "325d4916-0f12-45ff-b0f1-3a3941054eb7": {"doc_hash": "059b3a30c7c32c2f6da6dc1b3bb829ba25f672f0d84b60fef3826169ca38d16d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "439f34f6-6d92-46e3-bdf6-715286b6bde2": {"doc_hash": "3d3bc0d474407fe12e2a454f5957c690e5187d56a6793fb7201312a4f57443d9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe741f39-589a-41f3-bedf-f7c0f392e16a": {"doc_hash": "58602806d109ade1d1bdc4497ab4fdf384ce94f84f5e1aa9ff38d77b02a7e50f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "93f78c61-9740-413d-b768-9912cd3161ef": {"doc_hash": "addb794dcd219eda208d43a83acf08006fa6f6a1f0bc3ad8ab2b1695ceba435c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "88eba853-64cb-46ff-9745-fc38c9666b66": {"doc_hash": "7c179654a658fb2569179e10da2fec9b83c0b1500ae73ca4e4a750b33c25c527", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "805833df-bcea-4177-93d4-81a549b5b783": {"doc_hash": "43c844742f93102b03a24feb395219e432f5905601ff84de4d5cb3bd3eef4ae7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "452bc98c-b51c-48ec-b3ee-b380a6694e8c": {"doc_hash": "64c85fcb03d0212c5384268d370e9f49d2e43238b76101155a28a028a1355e1e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d3df5068-0141-4827-b583-b773ae1f5d9d": {"doc_hash": "d66e0e4217f65dafd2d002b74ba0148b87fd9cc988494cdc70ecbb932f94c49c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f2720dc3-4669-4a19-8931-0cb9f4aedfc8": {"doc_hash": "2a2344023effda908fc40ddecb793ab8a2b5939303bac71b9178cba02b57bce2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825": {"doc_hash": "dd9082f15f8096381b1e8472f7030328e8be414d19c67e03851d5a071507c008", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6b94705c-6231-414b-8837-306425a652e2": {"doc_hash": "c6f7cbc28215bb6800fc645100395bdd5c16847f0fd6a7022404e6ee238266d9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "db3e8b23-f911-483b-907c-e2034c2c6619": {"doc_hash": "b9aa978c584594660fd10e2d298734d9130583a73fbec914b840fa11f44836b1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "444806d2-21e8-4dac-81da-2d64fb82874d": {"doc_hash": "8d31b0ef65322abc1c72fc7f4926115a1b7e71b2b1e2081e5ed507b8cff5344d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4738d7db-d5a4-418b-87d8-d339f43aaa9d": {"doc_hash": "f43bfafe3cf994c1379433990d95bf60ca14c9f72908a623aa20180da9081463", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9492e792-71ba-48a9-ad92-d107fec88cd4": {"doc_hash": "77706e6fd3b7e9d664dbc8b777a2b604214424a5edb829555b48bd339c2601de", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2cae41a0-1340-4553-8c9c-2bea5804282e": {"doc_hash": "b2a5b75b1fe05b11b1e8c241312136e431d5a5a8b94d3a304b7182aef81c2a7a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "821d7380-66d7-4bda-bee8-5a015eb5274b": {"doc_hash": "a50101756704dae34bcfdc115e880948f75292ed94609bb92cfda093d61552e1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "64948313-8678-4ec7-9b72-e4efde89c47e": {"doc_hash": "1ac6187563cf16ce6b9c4bb4aa754ea55cc569ff5f2b5d643b0ee6866fdac11e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3014e910-a107-44bd-96f1-01ca21a4162e": {"doc_hash": "282eae1cd6b028ec44fd3953e83c127179e677cea90d9eea1d507f1c60658090", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d6446025-b127-41e4-8ca6-bdb54cb306e7": {"doc_hash": "0c469989f08997b6252ecf32781ae7b9395e959f8624fb2c3acac60b0627fe6c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b46503b1-bae2-491a-9ff0-68e8a87e97d1": {"doc_hash": "0f8b576908f2ff7f09e7d9d72194e01eac2da2a861e533f5b1cf4b3215e68e24", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e": {"doc_hash": "38aacfc1aa80eb54716d969288efb91cdf788bfcde69431c76172ac1a0d5a564", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "38af156f-141a-42f4-b5a7-a06e5cc94324": {"doc_hash": "eb132a180a2797cf926198b67a1d88f45c209d293ade55c24df87fc58da896c8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "feef5e1f-17a7-4f08-bc6e-d44dacd74466": {"doc_hash": "5a4e3b453cf3d06d4315d60d2443c437bdc808f508189b5ded547ea048613b19", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6e440c97-48ed-4abe-a86e-cc2f85db3302": {"doc_hash": "854ae2856e43d9eb2a972dfbc984c27ce4eaf10c6bec946e6ab489f32154ff37", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a847bbfb-3bdd-4a42-9f70-93f7c3e01045": {"doc_hash": "b24f1069a1e3ac69ebc62d4523a01696120a47a00a64fa5f96e0c488520b6f89", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c5599d78-d420-4c6c-b2cb-34664566ea98": {"doc_hash": "499443794ccedb84fa8660e19122219b33843657291d3f11b3422db4fb1439ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "04e5553b-1e36-4398-a203-82cab3a97862": {"doc_hash": "b8b804108ef142ec32598ee6bd986a67e1735c70af42dd67ed98fdf84a9e5ceb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb": {"doc_hash": "baa89403c84e989c8a3b155af98f5c2ac68fcab6997f1795efb80ea0d301a674", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "499f9f47-a0fd-4700-8766-2142378586f3": {"doc_hash": "e8ff644bb6067e791fca938b2a80c79441cf0df37c109ce03c9a66610c788f69", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17b6d99e-9d9b-4f01-9dc2-1678807a7542": {"doc_hash": "fca603c71521c44d13e08cc2697762192fce57cc7a9002b4bcf468e3e72d1791", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "84361864-b821-4556-a1a5-a31862178d2e": {"doc_hash": "55e1411a5a3c0ac2fe8da38b145846810be349d98acab78cdee18b8ffb3d2982", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f21722f3-752f-4e76-939b-1429ee6d8f0b": {"doc_hash": "51261518d02259e68c5a15e7cf41c21f8ed92f08f0a0eca2abcdc5bc38c35548", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5c77ea63-5345-445f-98ad-bc69576269e2": {"doc_hash": "30b3debab4e4af89382e741fedaf01e6e6976da684751bff2aae3ed9a4adb4fb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "969a5fde-1aa8-49c0-9d42-1278d227edf3": {"doc_hash": "3b2f07149b665f1b38236655db170f57a36e5e75246d92538c67b39c09301a52", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "30dd1149-5ef6-4b19-9a04-1765c33e6357": {"doc_hash": "0302590233149aa647fff0b394c416ea288535bdcb143e9a328dcd5f4c41c21f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4a5afb10-541b-4ba5-bc50-08d570df9b4a": {"doc_hash": "384e5bcf5af78f7b161498883409bf23cb4276d625c5cea3bace0222e2ffbdba", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f2700c23-4a8b-4b21-a0e3-fb643fa832bd": {"doc_hash": "e1e655c80aadb885b1ce77e17fa6284f024c3273a66a7173946bf505668cd126", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0406f467-70ca-4e52-a125-5b56713e3d32": {"doc_hash": "ead2bcdaef40887576da026e87f3fb7fd6e6d303a22da92db20bdc5d79a3d47e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b6bfa95e-9911-483c-8cab-301b588a3d30": {"doc_hash": "f2713c5ea5269288b1e6d1a35f8c97b60f2b1ca3a6122921de67c98fa9df8a79", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f46ef4dc-a8ad-4996-b30a-a4d1437349a1": {"doc_hash": "799093ea4e045154e8d9946844203d1dd9671c5457c356850ecf70df100b392a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5c78eade-4392-4784-adec-d0ca1263fab8": {"doc_hash": "1bc3e918a3c6c5d95df4beb146b44e6a03a3c75f8a77a7303967b0cf950f1807", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5572979b-2863-44cd-870d-45d52c262bf5": {"doc_hash": "eb454b3b004644df699c7b8514b617821b0772f65668c7345f7dbd3b633287c1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc": {"doc_hash": "e0c208e58235c65a19cd78ee2a656bdd276c87f58d4e6ed52d750e798c7404cc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ca18ce33-4362-44d5-a406-6c27a0f7861a": {"doc_hash": "a053bc5c8a23b1157927f1e16c8a094baa57295676434e2dc8c7563322fd4afa", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5e75e3d8-1d45-4315-8837-3293744b4f73": {"doc_hash": "b2d222530b02405ab40841bd4c030b2cbd8aa4668518b1dbe5a381eecfb75c96", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "30455866-ee22-48b1-a060-d2ba0f715c2d": {"doc_hash": "56335e12178fb38853df2d0cff52b662fe2eeb41263dce2e9418db30140fa40b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5314ffab-e934-4121-95ff-339857a4d727": {"doc_hash": "c70db078e1bb96690e52629cc076a6d2ad07ff13a3cad6f27b84ece044a917a9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cd61513d-e844-47cb-8fca-0d6b4863cc35": {"doc_hash": "123cca325630f890a327b6e37d7b32f81ae494dc64a71b087293427bf45913ea", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a3e31b32-a8d2-46f0-85c1-030e263106b4": {"doc_hash": "dc578b4612921654eca24103f7c285e7b0680c94b1b8878329c0e25c5b64b5db", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "98209a41-983e-4d93-bdd7-7b6fe9ace351": {"doc_hash": "6c4b9e9e6e3c111b900c2cd6f2cd70faf798072c5eeee1c78e8603fd8a090add", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "effe17c5-6c45-4886-9061-8deb8777eae7": {"doc_hash": "d912e281b27207efe24e9223cd3b31763e54126623726b447d504a8e3116858c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "540a4284-abfb-469d-adf7-efdf2de6e063": {"doc_hash": "267652289d696e8fda4b753f64b7c09e7cc59d1178b10b9cb40df8d92e773388", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "be0ca02e-4045-4bc9-8a40-86124675bb1b": {"doc_hash": "c4e36a6e5bd6301ea21091682fbe22de3e6dd46de39e271fe5ac6c57f32ccbb4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9ae8827d-c5d5-403f-ac27-989575a368d0": {"doc_hash": "dcfc4303f3eb3af48b3c8c9307bdd98957fe37416a1be2567edff8e3a3544c9a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17b70296-1704-4418-be14-d803ce2ecc58": {"doc_hash": "f3d195710f771ec4f4821c7fbea012e7deeb2a33590f4243adc6f11acb1e0b79", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1ba67c51-e33b-42d7-99eb-d7de474219b2": {"doc_hash": "8383d16e4beb7dce10e6b41309756ba71f90c1be180b0e3d94ddfedeb7a773e6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7ff32c28-a48e-4e1b-9b6d-608f353fef98": {"doc_hash": "8667aa410145d1d83f226976778cb2203a4dda4b2f89818fb159113ec5a9033f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8581e29b-16dc-4f42-bc24-47cb29731d88": {"doc_hash": "5599af10bbd80c2d888d3b2c8f0f0ac3bfd02097dae7249ff18c6cb62ee27c33", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6a3d36bb-ce49-48e0-bc98-77b76c8ac681": {"doc_hash": "e9dc08721ef6bc294fe360693be3caa99ae7044afaaa648303bf21af9fda704e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25055c3c-e8cf-42ae-a948-5953dc4b97ba": {"doc_hash": "1ad9a0d128dc02eacd4377528724bf059bc9cb0e6f2fbafa288253ad94c4fcbd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2d80ce78-54b9-4216-9c4c-29a5c55a42ec": {"doc_hash": "125229bbb2f6d2643c532f117f8b65abf8a5febdadd3f479f6c68e4562fffe0c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7": {"doc_hash": "4552a35bb5e38c366067cc1099b3572085b0f4cbd48d3ed2edcd8c392aa727b5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "09f02465-7111-4748-b6e8-73752d699022": {"doc_hash": "59d1dffe1d5363335bc8b5c711bfeafd55c3ad6d3098b31def1f1e86eb575560", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e": {"doc_hash": "189fd8b28f91a84ee94eba1fafd26a0dbd42a4c1190ac1ce94b44af392b7a1d7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d28de995-adf0-4861-9b60-eff1bd7dc124": {"doc_hash": "fe052919b2ad5bca007902d62e0b514aab7662d545c92c7143831f57a81d13b4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92": {"doc_hash": "f2dfdbbe5a67d24ce398b6be9d209172f77924a19974ca6f82147507d2ac9f39", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4": {"doc_hash": "866b8dba809fcaaca315a6d3af56925eb42e89d537d1a0a03eaffe178349faec", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a31e210e-d07f-4e17-96db-44873154d745": {"doc_hash": "b5d28a3be0f0e7eea41c1d4229ad7b639f0419fed501946265096df531cc6427", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "09239c16-5b95-4566-8027-af1115b90ffb": {"doc_hash": "2a14992363d218016cd4d91f93a64d877e428bb05d8f063f64ffcb82db5f2059", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "57dd61ec-fd44-49ad-8777-e972d4ff2ee0": {"doc_hash": "9f738f6dc26292c9552f70c5fa300e63d2d2d3d59963320a3f300f6413fb0de8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9d6d45c1-1b08-44cc-813d-3104c9d48101": {"doc_hash": "9ae009eaacf19ed2f6727901bfbf719a1bdc930f3c9aba00461f0b7827d855fc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4": {"doc_hash": "42a291a618ae4732fccf09ad2183509e2abbfb9fa1b34108016552c54a9c642e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "080c4498-a345-4c55-acf3-3ffc2b1d5347": {"doc_hash": "b42699445b7e4b6064f8cb58e6c3616f0cfc98bc630ad245bfedeae22b4b0bf0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa": {"doc_hash": "87221c89b849197e3fcdfa9dbf2cebed9937088be71c1ef195c848d714531eaf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b30ae036-584a-41c0-ab11-09b566a27014": {"doc_hash": "f2a7b8203c10ddb77a15cf0df84c342a956416e7f3ecda3d701f39cff85702f2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8347e777-f555-484f-ba9b-687f777dce65": {"doc_hash": "db50af63b686b98cbb6f6fb560138af5a7f6466c250de1cca22d2d0c59f97fd8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b07a199d-3d88-4932-8360-c436e7624a7b": {"doc_hash": "f5eda4a66eb60b8529ef70087436c6abae5dfe813109944cdb3cd71cacfa5caf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "18f2dcce-f902-43c9-8004-213e3ce18d2f": {"doc_hash": "e62834830a4ceb17fe28d46b3d211ffeb7128acf75928202e6f2fc1aa928bd5b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "20816ca8-bcbb-4b1a-be21-b5cf03d5375a": {"doc_hash": "75e8742e119888e5d57a7ce02003754e79e529dfe761e4462e7ba3ac4eb941a0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "97ab9268-d664-4cdb-bc6e-01a11f926629": {"doc_hash": "64f98d344159ef64225a7c4a984340623c1ea5705085c518cec62149be24ed0b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7c0be403-436c-4820-9984-136e9b5d21c7": {"doc_hash": "abbfd43bc8b20971423fa2875b9dc2519c5a8dea10cb0a166a72ba06b751c0d3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ce22a1e4-9c08-4287-94d3-2569b25c1850": {"doc_hash": "109a57904978953011bdb5f6545caf69f238f375615d71176f540e50b76f3f2e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "881f882a-c04e-4abd-b470-9608b6ce84f5": {"doc_hash": "1c4f6e40b84fc0d66f4d5add978bdaea8f438a503f0c4286b9adf67e74604c8f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "815b7478-91e6-48ee-ad73-5bcac53aa1da": {"doc_hash": "47e6a549559d36df6ed7b3363e2f0873c666d4b57423b78c82224863ea0dc07c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "da32abdb-18f3-48e9-9a4f-0532fd6cc95c": {"doc_hash": "6080aa849bda4ce1864ef06d74adb3673c33eacc05319f5a63b4449a18654e8e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ef0d3042-1780-48a4-a7e2-68ac91d40ef9": {"doc_hash": "96765c81bc9ef27d7f16259e5b13674e03304ebe8189b1b915878ae8c97cf5b2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9bf70dab-412b-4c9c-b211-86e70caab207": {"doc_hash": "51ee27cfbd8f8e541139aaee88d328d85e7cd4278d5aebca844a211bcc27c586", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "653c70f6-f074-4140-b88e-3cf8998f75a2": {"doc_hash": "6a9e127fe40364b7bf3dd6c76b49bcc7792b9ccddb78ab95f94369bc021183ef", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f226afe7-e029-4a9f-b1c9-454abe410d7b": {"doc_hash": "c8a257df62de322a00d808bf858f356ae20350e4e20f39a4a33a5b4835d59e5a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d2c585b-1fbd-4787-942e-8eb3c9d8b979": {"doc_hash": "c97934cb16a56f6a462ba9ccf85f15e9e5548e5a5edf571547bb1c8d2770e946", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "585507d7-11dd-4785-9536-1d503cc157d9": {"doc_hash": "61e539988d855eba7c657290843d3f8a08a7bcdc1f31a38876b1c53c9930340a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fbc8a8ff-692e-48fc-afad-81ec2e7777e2": {"doc_hash": "9171f07400338d554da9848e5a024816277a183b02446f50652e816338c931c0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "337fcab1-fcb4-4a29-9db8-82b36a72ecfb": {"doc_hash": "e1eadcb90881c7d091a53c51a080d5b73f7cdf1e3af3e23e3a8091ee0aca14c5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "262c1bee-ab30-4056-8a25-480f8304420e": {"doc_hash": "000845cd0166b5c21665668ae0b8fc1345c551b2b21f545a189ff038f23934be", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2e55fa52-a34c-4427-b497-a98c074589af": {"doc_hash": "78677f120bb5a274f0fbe40367b64d3dec9e362809d8289a16742132daeccc3f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe71d38c-30df-4a7c-a675-873b589f5b1a": {"doc_hash": "9bce028917e6c1fe2096e5b83db8f7c0eb976457adf56d12b9b0d745ad6774db", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ff83caf1-b91c-4092-a387-039161a5ddbb": {"doc_hash": "6839c1cad36e4671a344ccbe6ae8ac64b8743a61f46ab0aa757cfeb61bb2363b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "69a7e9fd-2701-4ce0-a07c-5449976d4818": {"doc_hash": "541b1eeaa0195d4022ca618524d3ba60e36542012eb9262a8553396fe9b2dd0a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c": {"doc_hash": "574fcec98a86e8813f013fb360f8738b293ea3faf185e1d4fee84192c7889f42", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b34528f6-bfda-4d05-905e-4637888d1f3f": {"doc_hash": "18fe14a1e68f16335046152f91a080ca592b81cedfec0219994b78c93fec4884", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a": {"doc_hash": "7b2fde52824114fa6aa884c583e9636a0c7ee8dcdd4002533aba168697ae7c65", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129": {"doc_hash": "2035091b1470acb295f5f3590af06f271a21923eaaccb03fe6d5ad0d8e5b1121", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bc26c442-0638-4ecb-8ede-0d9fad749592": {"doc_hash": "eae8e29b6beea6bea846ea7bcf7b46e64bf5204a23277bfa2185a2f7cc81e9b6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dfdb0fe1-039f-46db-86b9-0a10d5744edd": {"doc_hash": "a21b0bcae443a61a4cdabb18600c6c1a57bc310840e060b736481f46af850323", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a50c3e99-7093-45d1-86cd-a513f3206a01": {"doc_hash": "55b11b12c7e5598a2c369b4ecffb57ee23ab6e1a50fc6f032c4d0b4ddf0926fe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f7dd4111-a43b-4e70-93a1-45a75074060b": {"doc_hash": "7c2d38ed2bb6e3f3a733bfe4a07b598a6daa8a1e8cc10bfd480f8ba53a5c6ec8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b943a226-2bd6-4d14-a178-5e1cc7732450": {"doc_hash": "4332dca19f1fac65e488083e2e9e24245138704a3b0eacaedcf9fcaacafe83a6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f28eb04f-c87b-4361-a74a-8b1d453840d2": {"doc_hash": "685b77f8cbb4ec20a117b9f91d274e5921207390d69a7437f72ac363d200cea0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "38bc428d-6a40-480a-af6e-c4a4491d85c9": {"doc_hash": "c879caf6e44e4243215ea91ff86de07d91af4e243bd28712ec26d26759a4f405", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ced0748f-8840-4758-808f-9c7c25da7617": {"doc_hash": "7291bcba8de4c28da7d30d4265398b8c9874f9e50c8512ce8a713bb052686afe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c619a43-f240-49a7-91fc-c5bc46fb2fa9": {"doc_hash": "ed7da96a669d1ba227c8b94ff72f4af123a80cb969b75ac9b34ad23ff7e6884b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c8798be2-44e1-4bc0-bc4c-027e842b425a": {"doc_hash": "43f41645e26166862c971ce074f0863299f3be98212f092853c1123f0180e9bd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b4f1628e-9280-4b3f-8ba5-96d2081b87dc": {"doc_hash": "37ea05d9b460153b7a6175aeee19f1b114f98eabafd4245a38444d387bb4db5b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f": {"doc_hash": "2830b8edc1989a037ee6bdca8e425591167d1d996d633134f3f8437277000fc5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ec66bbb0-1388-48e6-b91e-430c75f57caa": {"doc_hash": "4779deb090af1ea16312df4d40055c745815f3e91c9aaa7071c84438ab628549", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "836469b3-50f3-431d-8d1c-a0f6e11e23f5": {"doc_hash": "9bf428dca99bf4c63ebc87aa3cb6c53adaf62bd9cb94f9128e5fd6f58a305529", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6": {"doc_hash": "90be75a0682edf69be50498eebb4bec5798df0eca94e4e9c82432b802043b4b1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df": {"doc_hash": "e7e0f35a8ba51ddd0c4e0d50329766605f3d31ca01a6faf87403d69b63177fd5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "06b7671a-1846-4f01-87fc-b0a41e658fb4": {"doc_hash": "f16d50d5c3162d0413729c7abd66d8ff513ccb44ed3e6063c02dd69d085dcc1b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "99a2f09a-5222-4606-88b0-cafd574b8889": {"doc_hash": "5a98bed1877a4a056da7186c3d44d0a8462cb17cd90bda7458a9e1722ff320f2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ba72a6f6-4f8c-4898-a36a-efb97d735117": {"doc_hash": "bbf88c6125b2ce6721f784588506f44ef9fc00017778d9a2fa2dc2b742df1717", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8b188f3a-afb7-4cf3-9edc-19eed509bc96": {"doc_hash": "b9b4a88f27f635732933182620a643191dbbc786153fd5974432bad1e92b06cb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bca09590-b1ba-4ec9-a2fa-a5c243d12804": {"doc_hash": "03c9158c67b5dd1bb32839463028f01bf62f11f28af7d11b73d880649681147d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ee44a729-ae02-4e9e-8cdd-aa947c80dc58": {"doc_hash": "933e18055b82d29845419e8a2e10bda484d8d034ae4c4575c7165f612fe55398", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb": {"doc_hash": "93bf6b53bb414243bb87281567a1360907e39b6f8ca2cf50161e90317a2d369d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0b67da66-6933-4e1d-9364-b7f11e18922a": {"doc_hash": "11ca171ec828fa748a9f5732f382740a3667cf5a94bd84907b7888ff7957b3ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "27578e8e-c44c-4da5-892d-93679b7939c7": {"doc_hash": "e803fd4358f8949860de94415a8b5be323e630d9a75968092971d3a3ecb9d33f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7": {"doc_hash": "5897be770633d00c135e5f9a33826bd64cea03cae96c41d0c4a9f3d9d760cd98", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "36134c7b-c6ec-425f-862c-8d08d2867463": {"doc_hash": "1122e6ffd891df77c32f4ea1668799c75af19014842634b047fa04934fc1c1e7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f": {"doc_hash": "7ff7a89de12634db4d3e211e8be8ea4118ac3086e62636b18f025a5643664368", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "856fbdb6-50d9-43d1-907e-728e8e1c8bad": {"doc_hash": "2f9894c6e3f8151eebb937a526885cad2fd0c4a9de5b2101259aee1e4052e911", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7": {"doc_hash": "4bdcb172b557b7e1c0db50b8f22303fd78887ebd0e5b07ab544c5f2f04b5d015", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e8c866d8-522c-4e53-9a29-5081916909e3": {"doc_hash": "ebab23e4da5aa8d525ee53d9727282d36ed272968cee86947b4fc055e41323e3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5fcefd32-5a5b-4eca-bd48-b9d30470907a": {"doc_hash": "2f15b63fa5fc5b0b0a03ceffc3db7b2c1225f2ca046aae4be47302be04038947", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b0e678a5-7403-4096-9648-173728b26a64": {"doc_hash": "5148ffd3113bdfde1e5fa76a7ab27efc41b888ecbab95de39c86d05a9437a52f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4adc7211-611e-453d-bf69-52cb7daa068c": {"doc_hash": "071394498da139f517cf323ea26213fb8b333a780fa2d48dd7f938b6b5a2296b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "83435386-7aed-4c05-96e9-2661e2393c7f": {"doc_hash": "121f54a75236888aff46a4836e4fd99ac13bc8b000329599a70c1eb763330ec1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "103ceb3c-99ab-4c6e-a42e-352c80efb7a3": {"doc_hash": "2af08a1466576ae2936e03255c1185055f35d9e1512be55d1f3c309aa61ff744", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fc8accd4-c848-4489-b58c-6a07aad18472": {"doc_hash": "a87e7b3c8c4116031082be57dcbf387edc5630223893ed36d6356a33c0ee908a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3": {"doc_hash": "0dfba0adb4245778a4b86cc17e0d369881a0629b161a963cdd39208e46e6a89f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a4ee8a04-86a5-4017-9525-56e63de561d5": {"doc_hash": "d891242be403b2f2d7e0a9ae3e4b50b699e42a7aff8604ce7830bed04696ddd9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a69a73e9-37d5-4bdb-90aa-27e37626b018": {"doc_hash": "80112f44c6fbbe12b4e887b6e9b0322a28bc2dfc93e328b0a7b29e2506f3bfe4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8aae115f-7f07-402f-a31a-6c7d5a2a82e5": {"doc_hash": "cc3a59bc3c7f26dea3793fa929765ffbbc79744f305ed423a1de614136c6bc5d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6f2836ce-f722-49a5-880a-2bd978f5824f": {"doc_hash": "d80f541dc6287ddf6df516f122c41e6204fe4d6d08524575863927e2eeaad002", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7a39664b-9d72-4b21-b589-3145f8d4321e": {"doc_hash": "7a82c489d51db6c49ea1d98dcdc5af6215391b9741c0f0d9a78fd91015df7c36", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b1ee681e-2b56-47ce-ab40-dfb695afbd7c": {"doc_hash": "ae355aa187806e451486d1a64c929e11929b15bc33c715f161217f116591875d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9905f06a-f993-48a2-ad7f-9756ab1ebcf2": {"doc_hash": "56b1f127b8d02797178b36811dce89a4c53e6ba0f0523f465e734cea7bd620b6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "55488926-b0b1-4a6c-90bf-291b7e7dd687": {"doc_hash": "26cb89309ddca69c657892c2466a1ec9edc2b7820de2d92d82da5dfea43cfcc4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e88e1403-b9fd-4768-897f-fba304b8c821": {"doc_hash": "3cd361950e8610e7c8d5d711a1f3905656d6662c72f864ffe00378c2a03413f4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "df2e7508-c250-4143-a6f0-b956977316b5": {"doc_hash": "743d83679773d334885fe6db87cb16ec118caf919ef815153d45bc167b1d655c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f26eca50-4efd-42cd-be49-1b4055ad1b92": {"doc_hash": "36ca1c9c66ada5596823e899ca3241fa81f600fdabfc637b69669fc83259a4a2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a": {"doc_hash": "310190ddd3e7e23819ba4e3fff2080197abf4b1c1a58f6c217617ac2eed427a0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "071c7c1a-0cea-411c-8400-38dac6ef5213": {"doc_hash": "1cd1942f2648c7affcf6f1dac8b023ad541f6fb508cc82183de72a8323940875", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "15187e20-014d-4d6e-b797-a6d6824ec412": {"doc_hash": "5734622ba9a6f5906d64166d2119fdde797484ed66040ea75dfe55faa6745470", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf": {"doc_hash": "efba16c950483fb52124336922848c91657d709614cf56fdf6c794348d248997", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9138c195-ffd6-471c-9b0f-35c6f2bda660": {"doc_hash": "41fdc068ddf776f9cf645295569cf1a26e8e2061218e5e12f7a1ed10aac50b96", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4a65a2da-9355-4c04-8c5f-73de32763679": {"doc_hash": "17f46f4fe0361cc762a60efa69be1a0ebf26fa76a437f4fae39ea3c7801d9a74", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b": {"doc_hash": "9ca545e7fa29a2f3c6a803bb68c2a98fdf9c84752b07e7039a744c6f349dde92", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fbc99444-b30e-47a3-ac23-14e533c66fa4": {"doc_hash": "e7cf1ee1854c4286283187b5b55eec555805a85b119d6030d6e6183d13e8103c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bf0828b5-f174-4c8d-942d-f2db7aa26b4a": {"doc_hash": "c88c6c6101952a5a3c9fbfa084ce1335196a7cc131f890d974be3cbacf433edd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f238aee1-b43d-458f-ae16-81150b5dd412": {"doc_hash": "bc94241f1bd7ff3ab034af66cd08498aa61169099dbf9cb997fab53738761d4d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b8236aa4-0fa4-4a03-8812-213d0e9dab58": {"doc_hash": "cbd0d38a59af4d4f611d1b39cb1f7c6f7672cdeb06d68d65208f7371ae614f9e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "af402f64-e59f-4d61-b4d2-9f44e7591806": {"doc_hash": "d21e1c74f245937d8cc2ebd28855ab6a28f7c3b65e397bf79f9c3aec2ec1c05f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9a29e474-b92b-4d2d-9eab-bf29e5dac261": {"doc_hash": "bab726d818ce76f58964328894f244d0519365cd0991b28bb600e992e7d2c567", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "41a7601e-f84b-4cf6-a699-ba87c0f930f2": {"doc_hash": "5d3f967ca158f96b6afae32fbd388bdd8035e32f8fbc67cb8e407b8dcc3a19cc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "90caea20-c716-4371-89e1-f4ac63612885": {"doc_hash": "4b11838887fab976c5b289d37697637d34a32bb9d37726e52e6d342bb1616ab5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ab6286ca-6f9f-4c76-82f5-7129afd57a7c": {"doc_hash": "c8cc5b995208f6ee629f43a294db1a09fdeffaed02472fd73b1d7d7808e58d25", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1d7adba4-1b08-45eb-993e-19df387b769d": {"doc_hash": "96d7fd976f34c0a6949bfd2ea16bfa9c3b03c40c6da9f4934abb29245dea4aa4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d5c8ca45-9cae-4545-b92a-5a54ae6256d5": {"doc_hash": "cf4e84891037ddb961ff6720802c1fa507ba94750157c7c6fbabcb6d3cf942e5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "01188e94-90f7-46cb-bd85-aef6c0902393": {"doc_hash": "5a2840a3332b9bfb702267b4ee4fc2c9dbc0bc607b9d7075f29fb29e348a4f68", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "feb82ae1-21c7-461a-9656-5530dab86794": {"doc_hash": "d24e3b6aa2b35a4fbbaa124e1db781bd879ca94b04a888b1191d92905e20f5b7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "94ba5eab-52d1-4f7a-835d-0706fac6d63f": {"doc_hash": "39a784054992700c995ec3d3b14e864a4b3e80ea4005947b5ccdb9d5d9a0ad42", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6cde88d1-71f6-4571-9f86-d3336b290a1e": {"doc_hash": "504c1368b38681667dd53f291ab25b05bf799f7c7659fcc9a9e8243934e4a8e9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3d49cc93-23e5-4276-b0e5-60a91fabaffa": {"doc_hash": "98d332448219ef2586e78bb57099f1ceee7f8a0eac3da0bce42be25e6c65b951", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457": {"doc_hash": "fae908cd1c36c8ce0a2c3b3d9da83837915a1ea1c1e994828d588a9d24f3b91b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e97c892e-f6f6-4047-a7ae-f89a861ffc31": {"doc_hash": "efbb0e0f54b01004ece55e98ec8e5949b7eef7b45bec1fd5931511de70a82557", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8": {"doc_hash": "68d0e4143ca5f6d8ae439721f482cb3da1acbd04b2229f9c0c7760b0dddfc761", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e1676b5a-d733-447d-b786-492ee36fb73d": {"doc_hash": "a5912e0c7750421f090aaa55f32d8f1ed5a305acf0dd37de86b3e50e5cf296a4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8": {"doc_hash": "ede3e9b453ec43d67b4e218e11a3b6c9c18904aba8741212f27bbac5c6a0a3d6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7fba77f6-783d-4784-b197-98359743af41": {"doc_hash": "f9417e4b8e545ef2090e8d18173fae6763a909ea76cf1077c9d051539b735355", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d": {"doc_hash": "680fdf474d78a67a5fbda575a3ea8d885d501ac5e2f5cc101e1a2266683d7ab6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "81b8fcaa-24b0-4e36-a70f-36321c55cc4b": {"doc_hash": "33a61cdde13d16f7afaac5161be08618b57e83cd58eed0a63522067078e6fb54", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "20cb007b-35b8-4e3d-9872-83a789531773": {"doc_hash": "7a1c5ef4a255b2f279c94f5cd7dce1af41b5592802cd7bc1fc0713505c84c439", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4ec8e19e-fdff-485e-8be3-d33993fce252": {"doc_hash": "34cc6aecb783c443c6d07af1f767027e9e27dd9bcc528ca44fac293e97ce4b76", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "47fd4105-9146-4e1f-b55b-8ee60eb703eb": {"doc_hash": "bb4deb038b29fa83228363daebc2a3a69db9074b86d61159d184c059d117eabf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5f2b5735-1ff7-4060-b09d-a23956e48381": {"doc_hash": "c112e70a495a6a94efe7739808758bbb563e5214ed34c4710db1b4cfc2387ccc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c07ae06d-5414-46c2-a759-d048cd1f91c9": {"doc_hash": "f20d080df29787b985f4e98c2a2fe6ccce56bd6f22729956ad251296ebe8b047", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "079d05e4-9356-420d-8533-711778f054a5": {"doc_hash": "958c9e7427a8836f17e1d133eb12e5b871ca8e6a097828785a6265c5e969f2a1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d007f427-36c2-40a8-9b31-ed2ef439ac49": {"doc_hash": "ef9eff1b3928bf20bba56ffcec6f8adc9b70b1157f9d16a7cfe4c0b9f4fd7e83", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "52c10101-23a3-4cb0-86d6-6ddeed17061d": {"doc_hash": "ece0a6994f09eab06af2c821c795de3b0cc0a804e2870b37d56292d5d4efbd52", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "30c91376-3c31-4fb6-9da7-bc245e3c2586": {"doc_hash": "60de6f5866c443d58d1e4ae2fc92a3844f0b5cc4a673d5de7602af16887c1fcf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fafda3f3-18f8-4366-b744-f92a4f01b495": {"doc_hash": "14f2996ab178de094208f47ef622b66cab8cf96d9b9969d6366b424f4394ac7c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a6abbff5-2dca-4d09-a566-103a46b86814": {"doc_hash": "58448b481266590ef2d2dee303b9938ba08677dc205d64cf71eeba32b79769d4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "524b05d7-cffd-4276-951a-cdacdaf9cac2": {"doc_hash": "c47c6ea1014fe2da59c61613443363495b63556099d2788e21327c63c750eaf4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6": {"doc_hash": "e8f5612ae4e7992383504c5f082ab9405a8c6af6ea8588e0849df532525636a6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2bc37f51-7861-44bf-95a5-cacd69bbf16a": {"doc_hash": "ad924989c4493b85ad1d2f47b1c6d48f674577c6c14a54339b235c2ec6195765", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9ea98d20-19fe-43b2-bcdc-7a64359de16f": {"doc_hash": "5a0141b7408e924160d6d14115757b2885fc8b7c3b71c09af4e68a52ca16130c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25c6d623-7212-495f-9203-dcc218943be7": {"doc_hash": "649f5691a91281371790d427382c49657515b79ea8bdc707003b3399700ab70e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5dbe6947-ec88-4513-8499-5c02866e0b1a": {"doc_hash": "42afe7c602c3f5c51a9d948cf0b6c89af5590c9ad67501ad4c7db0b68668369f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bcadae36-028e-45c6-a88b-3832c3fc3524": {"doc_hash": "a24ade5de6fd1740927f459978a37f638135f380be4eeaa2599634de61bb2067", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "216f561c-5899-481d-bd8a-524430ade7d1": {"doc_hash": "38d959fb897b9f4bfd2a1ccb30c1d8092787a42a96bbb728fc953eaa16216e83", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3": {"doc_hash": "efde7f6940d6e1a2a1850305634820945a9c0579cc1c9b61c85dfb9023fa4efe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e45ad1e0-2af3-49d3-a545-2adf212a4013": {"doc_hash": "ea0c3de769842d2549b49039863f52109e1dc1311184a7a8facb60f450ac3e6f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ca134844-683a-495d-a69b-ad9c66a4040c": {"doc_hash": "9be38752d66d807290ad4523cdcb9d3d153b251d6a5b27e26f8d7af66cfdfe83", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dab772df-fad7-4e74-a241-96b18ada7f01": {"doc_hash": "fcfa057187f566bca94b810b17ec7126e2c6524da0157fc696ee449337518d7f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "96bd5939-4069-4e77-ab9c-3be9bb5f5b62": {"doc_hash": "ae7b814796f1d4934974a0aaa4fcc83c8fdfc52e811754f11d61db56f5053e58", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e6d9ab09-6c64-4e95-a6a2-9868f680646d": {"doc_hash": "ffcca3348b8c1e732c8e27f9be9801495c2b15f812f51e05e672210c72f52880", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "379daacf-db91-4803-8797-738959d62c68": {"doc_hash": "6db7aaec15f7d2c987db2ba89d731e8c4a960e39ef8bfcc43a66104ea483f6e8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "020544a1-99cb-4974-aa81-6c77428d9a61": {"doc_hash": "55cd8c6d1e152e4830d122fa7829e32eb7a9045e95e578ed9105d059dffd0af5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2cbe8c40-fe58-4443-87fe-4153b1f157bf": {"doc_hash": "f91f518e4dbd38f938e176015901ce1eb587c92e78702bd8afa2748a2d4809b1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1714cc7c-31df-49c3-ab78-61ae75f057d6": {"doc_hash": "6b1b90820c81a0026a4ba679f795ad09d32dbc722905e18ef2408eb08715c91c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7d9c121a-4000-4387-960f-62caea151cda": {"doc_hash": "d00dd1338749371f0ee91d01d4fe91a832de33aca644ea84be2e0e22261f9422", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "789229fb-a1cd-484c-a3b2-46341da8ac31": {"doc_hash": "d1109204e3645433ab1434115450359a28ed382d81940a50fdc6263165b1a0b2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3741d8c8-eda7-4c82-b775-dc9b223061d5": {"doc_hash": "c9800b2ff8d3c3734e65d7ea780d9e8276ff616d6e5e14d17d7b4ff421f724f2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ed070ac8-242c-436d-979f-e5dfb6a0a8c9": {"doc_hash": "311938c745a21f147f6567965771997fe91befa612afda3c8936c47e56de58a8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "04b13447-624c-4234-8f00-ddc6e36c6261": {"doc_hash": "eb24fe057ee1d6b8fc6e992577fb70eff6caf4c31c2fa62e3860996ad0d7d97e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8c368101-cf58-43d0-b0f5-405073a07265": {"doc_hash": "8b6ea312a6c51a74270a5728ccc379c91b4b0e12f5f75e51f5046140f62e5180", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5b3eb67a-149f-437c-9947-f25de38686ad": {"doc_hash": "8d8975b96147c1ebf2611f3f3e6d3658d2746da80b69b1f7527ff5e7bd21fdec", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e55d245d-d07f-4a06-923b-df6c86366071": {"doc_hash": "59b68497c7c2201fe2a093a53b7f1fb6bf76fadfaf0a13195bc33a3def55966d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d50a217-465b-45ec-b02d-f640e3b65384": {"doc_hash": "4aa7242cd1a2b1c83de1eca37f1f4a9ddf85ba11aaab6fd81b12ead28febbd10", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f6312498-1b40-483c-94e8-69ab9cfe89e3": {"doc_hash": "b2d0750ef3a93072209e259eb89025da6181f785277380ec0a79d116626e61a2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e8aae986-aad5-4687-8947-c0651f775ade": {"doc_hash": "f3d4bef01e5f5d0fe678aee8be7514350ccb13283fb56694be0d5cf11dd35eb2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c192aa80-849b-451e-8c65-b717f84ec28c": {"doc_hash": "773d846e3f2a5b1610a32e60873bbfce4a2b809b046ba95606cb40034b5f8f52", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49": {"doc_hash": "4265fa846338def076c0ba8f29a52ec55076ec42c3d94681dab626e4b33560d1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "eb14c19c-3e70-47b0-a6c1-af75e5370765": {"doc_hash": "12181085e277c9f7b4698f873c81b9137dcf3765bde2d48ea0414c30d27af126", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2b596c03-cdbe-4d95-bb27-acb23a20f45f": {"doc_hash": "2854355e88fc96c5c8b72436ff75dd701f0fd97a5193a0e34679e4fc3989ec3d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "066934b3-376f-4c44-963d-582da2c082ee": {"doc_hash": "60e45ab5d1ffb1bbdb1a3e4342d16edd94bf52a5e32dbaedb8a034e1d2650aa7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fc4eb05a-8cb8-4116-8635-524e9867c4cc": {"doc_hash": "fbcc86a7247413953493145039158a50072717ac2ee05462e107650a6b02f55d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda": {"doc_hash": "f718c0829d5d15ec20d7dc243083e4c40404306e8a4d8f892e7f47925cbdeb44", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08": {"doc_hash": "08a7ed8df05f285019a22ed2b4408fc81fb6bf668ab4929be0c6a9dda1e93147", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "745fea69-17e1-4cf3-94b3-a2ca2cc93d52": {"doc_hash": "2556d7d6e0ae724259ffb7d7e29db378540c29c81155ea76af44154a000ba711", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ed2262d0-d291-4519-81c0-fbbdd34decc8": {"doc_hash": "de8b7f860874cd1736573e9bacc5efdf917b8288472e973a2c710f932eed471e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "16149cf3-726a-481c-b7ae-7b8e3b9a06d3": {"doc_hash": "0e223a115fa2b2d4b5472ecb57ab8bbef8584578a09085b8a5619fc834b685de", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c4bdd6cc-2e56-482c-bd50-78786efc968d": {"doc_hash": "391819ccdc84bfb8c3729479b149e001fc35bd7e676708875c3149482085a881", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ce895fd5-493c-4fc9-8cb3-44afeb12d472": {"doc_hash": "f457c85c520f2626c78126e285157d9629d6c655121da9f464ae87ebfc3ea40e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cc8d696d-c1d6-4de3-b73d-d86d9caedd88": {"doc_hash": "3491bf595a940c0f7272e310694405c4ae72c426aa4771445a16b19a24dc9ab0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0": {"doc_hash": "a3233ddc7fd6096926749aaeba36601856be0fa48db6a5ca2ed784f4832da4cf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "98725ca6-45fd-40b2-a90b-3b0b031018bb": {"doc_hash": "93b8b0b0879f5a599f4852774e9aecf3b72bccc775cd692e69d86b1e08c16508", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "262df308-d299-464d-9709-654347f75a42": {"doc_hash": "c4977a8c0a897634cc27a38e5e08db7f6ca914023e6f969e7d7c0f1b4472058a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba": {"doc_hash": "c1bb27db81daf0d216d0989596ba4ccbc76a1083adca04b9a452af3d14babc55", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f60b9a44-dd5e-4f45-841f-a61ed2ea1403": {"doc_hash": "774cd969ff50313b82997d64f9b2e33ecb941069204314328ff8551b83c38ef9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bd3504db-ef98-4219-8390-ad39c8c56d9d": {"doc_hash": "6d065ce56a2e6c1f4687bc94434be19964684c7c0a9d802c674752e3bdbcbe49", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "75dba363-5554-4930-aef5-b2e7e72fc1ec": {"doc_hash": "b5bd4321785741417c3682eb4737f8a90763ce47125b510170f51afaf24c5b92", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4e2999a5-e19b-459a-829d-4feff5e69605": {"doc_hash": "de390a2907d0cf262ce54afd40ad2fe9ccf66ff9cff2a66e84316af553e77a80", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "46da3bf6-3a19-4591-b4e7-e58c2df3d70a": {"doc_hash": "4038a38bbe8f1b39611eeda1a469bf01a6738d380100ad1cbe6310d5a4249721", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "be2059b1-c993-4075-ba87-0a9a4876b6a1": {"doc_hash": "aa7f2c81be1f2ef3d1d499ab765ca6546cc14201d175c725870287954938c8f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e5aee2ea-a677-438a-92db-4f9006ffa7ad": {"doc_hash": "24660cd99fd9ad517094cd317c9d638c454bebf9fa62c85b4975a132c536ef1b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "643344ab-ed1a-4ddf-a835-06cf20584250": {"doc_hash": "7c985ebc96f4cd706ccf6ca66368a0703f121a5711c8c4a59d3f0dcfb8800525", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a123fbb6-9e49-4f80-8ce3-8531619a2e99": {"doc_hash": "01495c9c8c894ac06e53f818764074e9ae8579869981fe89100da5180b16c9a0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "79117bf7-9333-4a0b-8199-07ee218c7e87": {"doc_hash": "2b0df8df8fd9b1d3ef02091e64a50722b9a9b64b1fafe63a6488a54c3b8df2eb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b5e7937a-96c8-41db-b598-53e6ff7a6646": {"doc_hash": "7e03f52fc4b349cb56e64735d2dc1794d3366806a151c4d563bda838897ff1ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "331f3848-841d-4b98-afc4-6a3da91974a6": {"doc_hash": "6a0b285c6bf3e06fd62ddc5f19cd05594951e4b0907bd02cb3afad9fcdca03f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a89816fc-81e8-4d35-bb2f-a2c436d05c16": {"doc_hash": "6f0a36bd54312b4239613a5d98369e531caea6ee2b4304444cca9e4c58d0b710", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ae5c0bec-4778-429c-a77f-e3f6bce7963c": {"doc_hash": "011eedb5904c7f2bfcc2ee4efff94451bdfd6afc22d4eb1bc8429f27effb92a7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b8c2029a-08a8-4292-bb9e-381318b954b1": {"doc_hash": "ecf7f8bbc55e974969068712736c53eb456f8e1687c3372c43a7431ddabb9048", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cbe33f32-cb4c-430a-8e25-2833f8866afe": {"doc_hash": "2bb806683ba31fb9968242e8f92fa954508346588a5e0a17eda616854a717d3c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fe114e0c-bcc9-490a-ab09-add26ddb6382": {"doc_hash": "92836b0980fbf3094f4804dc18ede7f12282825b2fdbb2edf4530c4aa43b0af3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dc7db30d-7fd8-4df3-866e-da87e169ba26": {"doc_hash": "2745e10dce0578d1db851cb2589e7c71f8bdcd07f3bbb7f7e76fdfa31c6395cd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b34b9390-01c5-4c2d-8183-d8ebd67ece64": {"doc_hash": "df793aea954b8e058d55b97bb122ff74451c2439f35f6cc5163886ac08b19298", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2fa7d627-bb0d-4614-9edd-00136a8223cd": {"doc_hash": "265354bd22f10ff1612e6f6c9e216dd1f6009c63cae45cdecd2d56385721e4dd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "772f82ad-0231-4d09-b381-dc5d72d95e00": {"doc_hash": "573ab21180b3d432b6d8c38aff9b947f8fbf96dd27ee31c4e3af815ad8c27ffe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a32bb209-d3ab-4683-ac5a-4a50097bd8fb": {"doc_hash": "09cae7bbd1fe02268f214cd09f29e6bf72ce7f74489f14888574ed327888b230", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d2dfc24e-c6f6-4b85-a15d-244cf347e221": {"doc_hash": "b93d60bb9430beb40e724f0311640c890d628b1f5b05de08a36aa63cc68a5b20", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "79ad8d9f-5a09-45a6-96df-6f80d24405bb": {"doc_hash": "5732e9a0c2cce116dafa7ebbffc2cec1e82fb53a97d7fc0698ce154c38388ef9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5677f329-585c-4409-bcaa-e3ddf3f21d72": {"doc_hash": "99f7f11ca0a033706145b09eb35bd463713e36cdbf445dd88f042bacc9bddcc2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d": {"doc_hash": "f2f5deedcfaec54be60a7d070ae7f362a4639dcf096b1cd73e1fd2048d24f0ff", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f9efa154-016b-4c2a-b511-5a84ec784152": {"doc_hash": "e02b9e43eedd14dc8b35b8028eb55719225213ee687bb5977d31db61e14e6163", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d41b5648-bf48-4c47-9696-1cbaaa778a5c": {"doc_hash": "1264731494ff3b332ff9d3e737ca66d75b19d6c837ba0b292729793a3c6c0957", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d54b9520-11fb-4782-93f7-3fc906b24ff1": {"doc_hash": "e8a43107db65aae2f051f262a441537212bee7f2972c44e5811e9917db7d1b41", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f": {"doc_hash": "df013d395c448a2469b425c1b81ee3dd7824aac3096d511d7a7e19606ac59db0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c": {"doc_hash": "3c656eb8c26a79f104aa469174b32fc60a319a4effbf29b9720ab7172cb08871", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6": {"doc_hash": "2e63d758b0d820fec147fb7a4e736939893b2a9be5739d712a47cd059bd8dcf9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c9566a54-0a82-4e1c-bdd0-17619158433e": {"doc_hash": "27f4b3ef7f9b781c0297f991f74c619666b13727ac680ca89a21083c5f84f37b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6": {"doc_hash": "1cd84cbd8227297fcd887baaa3dbecca3005efb65f78f635003d04d08d2e685c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e": {"doc_hash": "b6b90412e2167fa2015b48b0a3dd1c77edeef2bf963a57da63aebfef67443c08", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1355a4e1-6c67-44b4-9b38-6655fd46ee2d": {"doc_hash": "a405b2888b1362dcd11f2c93badd5817e12c72a3a054051b8f7b4acdb9e92332", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cb36c6b2-6823-4b87-b4d6-4d3f536b354e": {"doc_hash": "740c74cf12c91d1fb19a716080baa9779624998f07e3929f5a35dca9a339bbc3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2bf501ac-e516-4c53-85fc-fa976cc093c4": {"doc_hash": "d30d257c07c3c7924982784527c37bf707590377d3581c1d3612e950e1203522", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "77d837d8-7954-41d2-adb5-48465d6ef4cb": {"doc_hash": "76b1f17adbb181468c57e7914f6491b8bc3fd23bac9aa50d345cb61041ec058f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "63715421-8ce4-4bfe-b926-4ee480c97509": {"doc_hash": "44912f1ac8a44295833982f5504bcbd0500018f021988fce8821c0f22d2f0e5c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4027d04e-7150-45ef-bc0f-ca04bc17db98": {"doc_hash": "b6aa9db6555d30b8620211dce655ee17e5b443a6411db8cd83a331a49bccd3ac", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "194396d4-069d-4aa5-b5b9-568268f53966": {"doc_hash": "700f28fb9f11e85d8ac657d59dbf437e8f0b12f72259e5b7a73a653f13a35395", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f": {"doc_hash": "5dc3c8492cea24344a55ce160b2c9d79d1ef03e5fcbe61e1da2910043b8a7497", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b943049b-79bf-44ea-910c-84c6830e1858": {"doc_hash": "f77c12b4c4fe937389d935662d593837447a799c05b2a53507a891dbd1974705", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "14a323dc-0a44-44be-8bc9-98a41d5b6a7f": {"doc_hash": "8bcbe11cad3df833bf7cd167edf3fc663502ce46f0bc303bea013de23e67a4f0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d": {"doc_hash": "ff953b96b1d7e297fa17be8588ef14a10706ace9bff8ffa1993b5c774be00ebf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "73f090ec-beef-4045-a6a2-9d91019279ab": {"doc_hash": "3e134343c3187fd488682f2826d92aab14827dbf9793a28145f140ec2cd20b48", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "60a03d25-6aff-4e41-bc3c-61ecacf1287d": {"doc_hash": "ebd240acebe334d62ab28a4c938c6957ae70ff1393964915bd3e7631a490b063", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "85814af2-e9f6-468f-a04b-5c9383455f9b": {"doc_hash": "47bddb3f87d4d336e4eecf998f16619b4744077370008a8dd16cb176f84dbe93", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e532e794-2d4e-4e9d-a45a-5283435cbe30": {"doc_hash": "d7023b56bb118dbacd284b8c2167808cbb7341673ea5592320e42361293076ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bce3834a-780e-4202-824d-144f2398c763": {"doc_hash": "17022b158c9a5471199377c2df03b392fbd069781d0605b178b72961118e90ac", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "733521fb-4cdb-4355-8db3-077f451b6a1e": {"doc_hash": "97bed994b92ffb0145e9bf5872019b0b84c2ce201b06f90493c4fbb629946a9a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3792bbe8-915e-4b73-b991-62852b05a285": {"doc_hash": "ccdc2a61fe4d017dfbba0815948132108408863b47fff42bed18a7a83668d112", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c07f7f64-1137-4da6-b539-a0fba5a7e33b": {"doc_hash": "afc866d7424d4eeeee21964fd2e22614d80519d3c313032ec44a8bd1788c97be", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "375a0faf-c84b-41f9-aeed-5e7aaec40b75": {"doc_hash": "be9bcc74efc5336cae1aa6b45c958baf7255e151019c4101446e104375c5adf7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ce787730-d0c5-48b2-ad6a-569a719433be": {"doc_hash": "72cb851aa7398ed9093652d335095aacc8e443c1c19436bc66d8bae99dd8ebe3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "de251c54-1177-42fc-bd1d-7a80c0d74fcd": {"doc_hash": "e519bc989a16ec94ab71771beb8f85b4a9d1fd8e3d77adb3a3727359929579d7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "35d63551-ba33-4ebb-aec0-49f214ae7e66": {"doc_hash": "9a55d30ea51897c8b9d65cd0da1c9e6f9881b7e45623a5a682b384ac9ae00703", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42da53ec-3b5d-4bf6-98d0-ef789de5cb84": {"doc_hash": "c1bae9c88ba5d1d7c0c2903ae65f1b0fa56389b5c737a0477d372edd359d8bdb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "47dbb703-fd1a-41ad-872a-56cdcc4be76e": {"doc_hash": "af019319cbf395818b8f445a2a7e5e52f340ede19662354817252897458f097c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "241a2d14-fa78-4dea-86e6-1bb8df72785e": {"doc_hash": "5ea68e49b5708dd3a9d60b6a6fbe61ef7e818641062d5c845f0e5f0177208633", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "72caf107-b306-44d8-9ae7-5e1e1bc21ba5": {"doc_hash": "44ea2cc57c410ddaa5dbd487f7da38a5d7c5649e91e380fafd1b385f0f425180", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1351b7b7-a4e7-4d81-a70c-cf97e97973d3": {"doc_hash": "12cc4ecb1fcebff01565b4dcc8455fa39c0f4a7faaa249d91cb2bdcbb583be66", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bf18eac3-1a40-4459-bad0-ddd3e216a85a": {"doc_hash": "333522c3918422b002ad20289fc921f332c8bb209f1b6185620aa9e8cdf9c36d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "384d8554-a949-4192-aed1-83d9b0ed4b2a": {"doc_hash": "415472b29ffd31b3ce9212114d37e2557e32cfbaab38eddd2b9faa33cd9c50b1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "778a92e3-c67a-4c2f-899a-284823717b0a": {"doc_hash": "7a6dbe2d7e123fdc58257a436486c628dd0b2f621adc6fac59e5c9e55145ce04", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1": {"doc_hash": "25dd1ff4e5c77ed0bb852f4a3d86896f72de38261feba601c467af39819c41df", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2f1d1648-bc76-4143-a663-c5db3f03a01f": {"doc_hash": "173cfdd1358fc5015c1f3a39e380403e186a299ec6fe1884d72f073bcc0948ba", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "37931bc3-97c6-4a26-bc5c-300b56bc7ba8": {"doc_hash": "b0fcfca6fb3893c6cb6e491d8d6bc11fd4e90db8a3ef562a45369f29842e2dce", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "29e6dd38-d85e-407d-ab21-1bf999ba448c": {"doc_hash": "bac530a48ae7c2b761dbb50a460dbb631fd434b245f672f416b5319df37ee6ad", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84": {"doc_hash": "b479a17c0004465fccc1e1b72a8ce6a564ec2247f0dbd780cf1246611fef28b3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5e100849-6a24-4de3-b941-2efb8a7e1dcd": {"doc_hash": "9bf3a33d687c69c71a7640797485ae5fa356561977cb6bb05a32409b3dbb977f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a": {"doc_hash": "d1a678036b85bada66320879a39074bfc1556e03689b8e72379244ff99a4909d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b5f742d0-3a6f-4fb3-98be-c8b77f829063": {"doc_hash": "2d907252f0adcb5cc48abed0a1fde2ee38f303bf423c4285dbbcb7507717874c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64": {"doc_hash": "a0cda1fa5a65dd885745eaa2c9d683bc98ea2aa65607779a0d449bc16aaa4574", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4d58f025-3af0-4221-a8c5-43da05af3b58": {"doc_hash": "f87004375ec7b1027db4adb911d1eac58857329a159dd5562dd5e69ec0831a53", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fca08b82-c7d6-4d24-98f0-8411111f33b1": {"doc_hash": "5960d3b66a8f3fd418904aae9406cda565cbcb6100762edf64274dbeee427d06", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fbd4ea09-d525-434a-be61-7773af642d22": {"doc_hash": "dc5b40a5b4be7f09036d3eba5024dd30fbc213e84d960517098443a0c8e53556", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fadcd762-e251-4528-b598-7c4d8983ed21": {"doc_hash": "0fad268e879943e882c433b6c6a0a709c9250b0c38eb9b9edcb2efbdfe2a9b7e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "71960f4d-b5dc-4073-9207-da469975e416": {"doc_hash": "0f02d3c3110dbdb8d9c26cc9aaf2209f763e01147a1e4111636886c533ec2488", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb": {"doc_hash": "13bc99e89878fea32090d288e1cd7b0747c69f9e890bb7a895df42d87819c38a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c5a7c9e5-69ae-48dd-978f-e82b6defd827": {"doc_hash": "1f53884a1a46f76cffccf6192ebec3b43d5f631b626125d7d6ea60014478e146", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "34fcb400-073d-4b70-9fde-5fab73e9365e": {"doc_hash": "473ad021366158aa8a711e4501b9e4fefd20ccd30df5002f6d8713c68025786b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5a7c92e8-71cc-4887-9e9c-49d1a639b382": {"doc_hash": "7b191ef53d9a7d820598c55e541541a0a6f2d326a2d64bcd5f3cb1d47e41c91c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0b127134-e4d4-4c6c-a36f-85e8f0393045": {"doc_hash": "65ac2946d23ad786f56772267b15d0e877504e6037b0967b47e3fa28b62dd544", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a9134fcd-a411-4f98-9779-eee1e98b8b5b": {"doc_hash": "63b05326c349eda0c862aff66e9b0d6bb6d7dfbe9f35e3cb47f2318cf9d50f5b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "22e15aeb-1c90-4bec-aba7-21d322c73146": {"doc_hash": "abafd02db710e75453052af575218cc0e7fcdbc1c4c66d7edb3e0219ac0b72e1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "77e32929-73fd-43fa-a6c4-d41df96f366c": {"doc_hash": "f4b4c92ec76169ce5695dc7cc2daddb856125f094c2145d03f9d72c4268c1782", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1fcc4b26-1815-499e-b8b9-2c7b5d53871d": {"doc_hash": "59a2b046f9c1274ab927e6550066cbb485fe7495f07d4cd54be3d15413667e5b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "04467916-4416-4ccf-b050-6170335a3157": {"doc_hash": "7ac3ffb7b7a1a01abfcebf60c7d833401c57d6d5accab763c4b7246a4ddce8f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "dcb0925a-7174-46be-b129-6ec42e64675b": {"doc_hash": "3bb470f72147609c85ae9614c7265a896af209856e14c3bc1176a12db12b96c8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "832b9eee-2266-4b1d-8b6b-64c35a4a1b90": {"doc_hash": "9ee5d5d5fcb0fc8deebbae52c15f50e76521b6c92e977d55542db3d55cb8268c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "92e31890-eadb-4919-a7db-87bed8926fb3": {"doc_hash": "5c597d2e51f7b6b4f1575ed19b94b7345fc97bc879debe5f1c20044d8fe308d2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ae96b1b5-7ba0-4ab0-9731-24197ba56570": {"doc_hash": "dc306354c04fc4cfb7ea4aa069eb56c3f34bfd08ea89fb50c4ab8dd5a3b0b8fb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa": {"doc_hash": "368fb24a24f52846b9f73d1f666d4ad652692af011185bae816e12b87299c053", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "02b11afa-ce2e-4aa2-9253-c1fe35302bd0": {"doc_hash": "2ddc776972f1f86b67d651c8c493b235f7ff265b6a22ca1d5fac566c3ccf000e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "409097e6-8d73-4a44-b2a6-56078b988ac6": {"doc_hash": "8352d050cfb7aaa8e4b4d0320b44b94fa8e5f6663b5886cbbc4f611784a0eb5d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "119e3691-4bb7-4980-b1df-bf4a55324d6c": {"doc_hash": "4fc4fa2c9df98ccda3d131f7a21974d1b2b0e43a0aa495fb4a96f1f7afcecd6c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bb24e12a-242f-46e0-af3b-8fdf8d5758fe": {"doc_hash": "92b10e70808ab5f7a12a3483588bc9561c5ec1cd6d51c2941dc4dfce06a10e56", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "936b39c1-b714-4718-961b-53dc7797d09d": {"doc_hash": "4771933b5b1ff3722859447f17b6147e4e8e0aecce7603f2e3b3500bce6d3f47", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17b9cc56-82a1-4841-83ee-5dce2aa40635": {"doc_hash": "42a50d97ee5f5b48b71a58aa8ab5f6937e08c2cfcd237da97983a161d3c93f06", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "31b01bf0-3307-492c-afe0-7229c3d85fa3": {"doc_hash": "0ca4687bbe7d588facfd6bb206dd8da8c064f4651a16958be18d499ce96638bb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8b2f129e-4481-44b9-adf7-fb2720e09ced": {"doc_hash": "10b46d83e7c37cc2eed6c483738771052e4bcfe5fa09d0d177ba81bf881f2dbf", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "24859b29-a3ea-42c7-9577-f3e5f708c844": {"doc_hash": "02de2fb0fbeacf9567bb60c41a19c14fbc2e4b0cbfcc79b5c70f6453f45fd813", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "36570aa1-8771-4b18-b9d2-0c7d42b87ccb": {"doc_hash": "cc9d15ac04d44669197d3661d0d0433364b78c8b7f9252186614cb1700f1e0f2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e4914e87-fae5-421e-ad75-3c21a73a6665": {"doc_hash": "68b03d1e00486e260420e8155b3299984ed3222cea240d2024c36427fb9bacdd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8": {"doc_hash": "5c596ea8db4d28696d13b588afd99f92c9b16cc5dd71b0da453fa232d20408c7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25b4d4bd-c952-4e40-9eed-af7eadd747d9": {"doc_hash": "41a838c1ba1a9ae03d4453de99f3960d80f5982b2e04d392f7cf5d7b258b6271", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0aecc149-6458-4984-89c2-daab8a9a2d18": {"doc_hash": "a959ead689b0a32ad437adc40ab7f7d2521a33afefe3dbb1972e6f02cac5cbff", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8": {"doc_hash": "d9cc8208d9908fe306d7207d33277e74de6babb3e3b451a41924234771a6b457", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f131a84a-7b7f-4220-87a6-b71617cacfb5": {"doc_hash": "c9f060287b1f0c97981d7845259200bf120cc6d7d4354365a1a695f6cff14321", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c50d2dbc-59bf-4da4-9705-07120474cd09": {"doc_hash": "9d71c5bcd0bb00d7807aa8259eed5ef118ce722a27ebfe251df5af03e43f451d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "21b11587-dd3a-4cc2-a120-5d92c953e2d8": {"doc_hash": "bf0de52ce70907f4ed0f57605624ed32bca93a28cc24be8249e5932b8bbe5bfc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0008d989-47ce-41a5-8ea1-d07bd161ac55": {"doc_hash": "94e11e7a5ee4c0c83e68fc818e4345682a2afd183104f96a906cd1f3abff414e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7d6fec03-c63b-4db5-874e-7f2b0e53824b": {"doc_hash": "77cf8aadbaa8d982c20838547cb5ae7a4c4772adf60d00995d3fa20712a438c6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7b38d1db-a8dd-49d2-9e4a-5410e0462a60": {"doc_hash": "8b4ff53307c0743588fd1c922de9d481596e80e41271720df1ce03743281a16b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ece62c27-00c5-4907-8204-44f42aa4f83c": {"doc_hash": "ca85de52801f2e2a382f4cea60fa78e7f8b4b00dd355a9b5f8e0b2b2622145cb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4191c316-bd5f-47aa-a4da-28ec81d7de88": {"doc_hash": "cf9eed25d253ff936b09faf6352b60d5f67777f1f81e6593b710a3d71d4aa920", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7eb0f7f7-7af8-45b8-b316-74589a8376a1": {"doc_hash": "146b741479267b88ec7ce46012789ab43ee92b28eb7af4e8e081972e94e3a5c8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02": {"doc_hash": "e4834cae0657ee853f4a9645d2cfdaab5966ad83ab96c700fdefbb648f8be48b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "cb37433a-bb55-47c9-8696-889f36d9a188": {"doc_hash": "27c817c5fdb8a774f423ae56c100d5c0104bd2b7cbb621a662e91eb8745cdd47", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805": {"doc_hash": "7e8d823a43a73d4b3a481253f809b6ee3d48947ab2289d6f0e071a0d77fa0fc9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a35de6b2-34ee-4b8e-a838-d2bffb62bf27": {"doc_hash": "66e4231f1085883887bc4bf8b0e654ddd509ad110444076defbb2d0a1ed54271", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a363da84-0efa-4250-8956-07f98d870449": {"doc_hash": "3ec3dd03d4e0862db995fa6ab4ef9f3c7de8cfc5076d1704ea40a376bc79c806", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9cf97173-98a8-417e-a319-eae2ab22fce8": {"doc_hash": "e4b24106d707dbd5dff7b368f32aa7be29c62c6f663fbdc031e2fd76979d0590", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8a6dd772-4525-4a20-87d2-c9e7ffd920d0": {"doc_hash": "b224fde266a4740089ccb4a0f5191f9b0fa9197933826fee32c4a4aeebfae290", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "78a7aef0-2ea7-4f55-83f0-dd1319f282d5": {"doc_hash": "cd9f006b816818881086d1d6e5043b409fbbd2887aea43597af49550bf6b4f51", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4a0d157c-3379-4047-96e9-4a52fb403ac1": {"doc_hash": "3b17e30cd7125f24fcd76ae892a57d5eaf536e77b68925ee1e6046e842b47af0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "13dded4f-225a-4a43-a4ea-b552be510614": {"doc_hash": "1168d97081d2d51c6b98b2beb9d4ba1e2fdde072dadf6e3fb2199700fed71559", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8bcf0b40-86c8-43f8-8165-8350447e3692": {"doc_hash": "7fd20316320f70194c9b0bedf936fc00422653f666c569a96d9172b017740fb2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "59331595-90e9-48ea-93c5-4dae43721d24": {"doc_hash": "0693d55e6d4667bd26b69aa5568fd599f8ae19a27c02b2899404d07398cf133d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6b96aa91-3327-4d94-a271-155962b1cc7f": {"doc_hash": "9b63c7326d322198859bf3254c734290a358b79ccd1c922abd46ccaac2733e87", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d6c90984-f636-4d04-a29a-c6324e0bf2ae": {"doc_hash": "c498ba3a4d23571f534965da6fb07c3552c68fecafeeb7eb6c4b0e92ca3c408f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "38cdb584-5486-4857-b69d-0bd473e6bbee": {"doc_hash": "1ebe7a1300a17051fc888b5a1da99ebef5ed1650b4809a6b8e7559628a36f2ca", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "90b08268-8e7f-4f3f-bbba-37a3309c088d": {"doc_hash": "974deeea697d994cfdf9876e70343f5567cb5a40ab32683929594770c6688446", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e": {"doc_hash": "664da35da316036f270faf541ae6a29625bb699e4f62b53a624ca9f337906194", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef": {"doc_hash": "d74d18af506a7b4fa2be9cdc1f2482f09d06cfdb3ede4e141e923b819f53ef09", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f97c811a-03ec-418b-8eb3-e24161f195f2": {"doc_hash": "31317f6f234053aea3724269158d85e61470ea0a2114c682f7a3984a8993cf08", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "57f0902e-649d-43b6-a41b-d6031f2a572e": {"doc_hash": "5879915ef616fe841d0bd5d2a2f4b1a2db46afa7300221adeed064e1b9c5632e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c5589445-716f-48e7-b5f4-e50f81a8b5e8": {"doc_hash": "a4e28421fef963894a3819b7827c5fe14936dbb16d4ec4a42337205285717724", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9edfebce-cda2-44e7-bf93-22f1471af47f": {"doc_hash": "a948e99b6aae1535c32fd68f7605db527526a8d13b904e605647e46f1599df88", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1": {"doc_hash": "c7fb5f854726ab102754e2bc58293ff1f7135b828cc1434be4cbebb868f7fed6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f": {"doc_hash": "9f3a17f1a23d612c8b768951cb648baac4ac3fdb4b63527df832a400d630ce9e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e1252243-36f1-48a2-82b5-2f450c0eca67": {"doc_hash": "7e7aab0b7b90cd579df30cc88a9c1f19bfe0d0f362c4ca200d708ded86edffb4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "65c2052a-2aca-462e-a47c-79262bbc21c6": {"doc_hash": "62ec895b0eb7fabcd236b391005409288c34c5495ec26ae8966aa5f56629e455", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a540e60e-c64b-4a8d-a490-41870f386ce6": {"doc_hash": "33fc36fed362a6a9371e8b2676bf530cac28cf7c3c60a94635b4a9798db88d65", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a6a97211-221b-49c5-88b6-2c01f313ca02": {"doc_hash": "3cd4601de016eb12b9ccba3e625d1a107f730edc756a23ea36bdc0199dace125", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "808d1512-1f22-4294-954d-15dce378b092": {"doc_hash": "4b910735ca73e4b431e6052dcefee32d700a647fd31b91066697494b122db290", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aba2add0-9fc9-4cca-914e-7a03af84e852": {"doc_hash": "31d1aa4452c37394a14e40135e6dc601ff2e0b7dbd50fa20878fd7af0183da02", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3ed751a7-875c-4061-a51e-39ed7a1a63b4": {"doc_hash": "440200f51663c7235ac0798bbddce43edf684cd7ed51a1fb0939297491ab3ade", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c902ecc8-41f1-4a31-ad05-0de98e694a77": {"doc_hash": "f4416b101c20cfbc1af7b252bce34953d3a6d35893ae28db1be35faf8f823140", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "066a6326-763f-4494-a458-53350b36806f": {"doc_hash": "5946b43a48df1643bbf47655dcee5a598c081d72ad360f9268636baafc1911f9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "934bf8f0-f438-4364-8583-f6f3bd4bc27c": {"doc_hash": "675b066129a91062e30b075a8adffa31a75cb7f382d71cf5d8bb15078bae430b", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0470d06d-df1d-4c0f-865b-64b7c566502b": {"doc_hash": "69fcb4f98688940116470e8e6e869943a6e613fc32983e4eecf057306d828973", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8f59cd69-4b89-4d65-a5f9-c158df2b2562": {"doc_hash": "eeb2e3a9ff08fcb8997be16ef5418c6b655a3ba4502051a501e103cc3cdc65ce", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034": {"doc_hash": "13c996212621f47e0f6044a2514f74332c0a5616cdc875057ce2c831eff6d5d4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e": {"doc_hash": "cf851195edb1b984b6feea7356f6df6b1be1f4b8170cf5bcdbad806c00e8bc74", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "98785b88-b462-4f3d-9689-13c8524b4738": {"doc_hash": "bfec69013c5d078c4e29d5a31574378acda30d9b964af12f0115e28cd32b2576", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bb8c9d31-dd2e-4f50-8295-467ed70ae70a": {"doc_hash": "b1353069f94b8dfa3c840b0b30607efc9945bac7edbf5f1482f2df523f9f788d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7e95e5e0-aed3-4fc4-909d-b17407e921b8": {"doc_hash": "ba18f74a39f9788a64c7538292911fb0e0c2cc6c8742aed0e0d4b783c2355ec0", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19": {"doc_hash": "475f5162211a65e002a737eb116383088f917d019fd94808ad7f04967f6657e5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e0975843-1213-4bb8-b0f7-89433f6505f8": {"doc_hash": "d4b029ccc01060d9a3cd939ceb3e02ec6cea23fe4072927ad79037498f486643", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bea604cd-e1e7-4cfd-a819-e8b4c124fa80": {"doc_hash": "0fdcc99525117298d8d7d80c22a481e8921632b7d7521642526f7ca2629ebe57", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9165083b-e56a-434c-9346-206b247c42ff": {"doc_hash": "ed6f130f4e579619a8ddb9bfe5acc7cb2751f55840f9b74fb9dacd3ecf61c350", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "db0abb44-02b1-4900-9cea-30cb8a34eb30": {"doc_hash": "b5c93ce26f3d63ac58b5be1c55c1a765ea226af322a92253ce55dbbe24bb660f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6ad3bf0f-bcab-4da1-af98-0656886f010f": {"doc_hash": "f78cd96ecb472a2531c378af525530ba43b7b1339025cfae93148a912b538ad7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "50c532d3-384e-4328-9afc-5dcd8cf84de1": {"doc_hash": "f524a92ed7d5c0d060d30d81b319eb441f306758b6170402fee2e57b65ea6b71", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "76785645-0ba2-4bdf-8141-ecfa99b51dc3": {"doc_hash": "a4a719f5340d492aac09d015aa8a06619baf9cfac8b055a538cd5e43a76b0c28", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "55be0b16-6a7a-424d-a068-f34d04f82894": {"doc_hash": "8644b1a9e52765a6fd9e92f0e347a3e4d1672694e3625c40d4011f9c4f14a8f7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e2afaa55-5537-4763-a075-920b698f586a": {"doc_hash": "62e4c3bc3459c8ce19ac105d76c148ba583d6af98bb4e9788e40a89648f95b2d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c3663e02-759a-4398-a62f-06e0bb4b213b": {"doc_hash": "b40454f9c16d3862e9967fd49c8e745bd0cdfa54a45f7d9a33c388f317924d2a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4b39663f-c96f-4aec-9e40-a291061d8137": {"doc_hash": "134143563f23012f8bbe1ccc6f4a149fa273818ba3e69d043e52024e52359461", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e0a97f98-df2a-43a3-a15a-243ec97e6d21": {"doc_hash": "d86f7e8738ef700e63ef713f1f5e717ee89d8b87b70e259a67860a2110303ed9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42563b7f-cbcd-4519-82d7-ca67adad7b27": {"doc_hash": "7fe18813d9afbda7d13989dcdfd42dbb37265d7d79b112e6d6d0b7289ef7a8eb", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2bcf57ea-ff46-42d1-ad9d-99deacf38369": {"doc_hash": "031507077d1ebe8c5010c208e8ed2b4ab691414704c0798d08bf414baaa44843", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "10e9d4ea-46ce-484d-949d-0b8ba30a7c43": {"doc_hash": "582c064369b9c7858eb698a69db351ff5d68ab4e86b086767fd514c95e6e95c8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fad2e5e6-9a2c-4936-96aa-85e293c2ded1": {"doc_hash": "972d3c08dddd008bdb7864542fb1b8d33bd5427ffa524fbacc2e95d894ece198", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "81918138-c432-473d-b792-3f072be98160": {"doc_hash": "cc0d344c3117bd780d9c98ace12185fb2b9c103e577e1e8e439a7b010ac301cd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2596d581-ed4a-400e-8142-70ce63a64ae7": {"doc_hash": "34da2b3ee35d2c2590fcd2b9a86468f4e0ad23b9e41d698d04db055aaad72797", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42f0b20d-734a-4f63-866b-b6ecd606d953": {"doc_hash": "f64e853b7fcb8a35f93a8d0e15339a4c57ef860ed3f09e2d6b0f9718a5fd5bf5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "460a4973-530a-436e-9f64-ac43ac154551": {"doc_hash": "59304967a286501d63348ab10116e8ed1a5b27f5d48d066555f43574b32f63fd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5f534f88-e5ad-49e1-bd89-b799513f4c00": {"doc_hash": "f405ac6e7a9e571de2baa511141a5a5581eefdf9bf52232000bf9f472055460e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1e602cba-2b97-4c95-adf8-23e950e42dd9": {"doc_hash": "485cf4ccb3638e139ef026d620f5a5992a2dd14d974c3e6fc88afbed7cec72dc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9c397935-cbdb-4af5-ae96-623262270cf3": {"doc_hash": "d9a3f2b0ae245a5cda9ab8b6a14875ce5a49c7bfa2925214286c05078cb0d90c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a": {"doc_hash": "179f521753d66bb5f16b4e68339679f9bc1b63b9608ebe835fdc7317765fae85", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3a1f4ce3-1c43-4e97-8d11-fa85315fd754": {"doc_hash": "399e0db4f12cee25346e3efd40d5712d2cc357ad2faf2c4e913d211c521ef7a7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "54ffc58a-716e-4ca2-a1fe-8c09ae37222b": {"doc_hash": "fc3949213a87e3c3dd333e2eb49154b9d1eb0a8db0d7b1687c4b95d91130a2e8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "868606b4-3614-4597-be8f-c25b3da8b6f0": {"doc_hash": "06b4f6cb0b227613e3984febfc9d56572e87e7378d6a106d80923c00a0905700", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a": {"doc_hash": "73fd2625b325a9c0152758ebd37361357f2b96edd90a8f25ff90d24f4c061613", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "84475cf5-b042-4415-9527-eb8121b9ea99": {"doc_hash": "404bc60db1ee62eda6692110fb1d11e6b8ba8c0ab60a1b16925c4245c058f44d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c6621a32-a894-4955-9a4d-755481e70816": {"doc_hash": "935fcb797ba0b804cebd6473f39d444c4d30874ab5cea7ed78f9d2ea142daa82", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "85784adb-42aa-4c09-8217-5f1fc3a0923b": {"doc_hash": "22ca8e3a4760165b34b5b8e8659da39a2e5dde54c0b20a887fe00669c6df5405", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "93dbb6bf-40a7-4ffc-9e39-62061ae9274c": {"doc_hash": "5d1f0bafc469ec0ff8c0c993c6f45d190d8038246511c57835b0ea1f1206fd4e", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "42c5c024-e12f-4a21-9ce0-919a80fe387a": {"doc_hash": "e83930d1c0270fe8bfbc36b266f0054ed29609f354cca28e167226d1eea0b0fd", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "24f57b00-4dfa-437a-8b73-2bfc6ae90afa": {"doc_hash": "88d9e3c2e586da2b0889cbff5ed39ad0ecfc24cec8915ad54da4e06b8224adde", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "05271871-334c-43b9-99ee-23accab4be00": {"doc_hash": "d145c9dcd579669dce9d19bb2f6897f10cbb5c1a783501349cabd938c330201c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "845821ef-55a0-447a-983c-e931e9eadc39": {"doc_hash": "bf3f647c5cec3768f6e6506dbfd35999dc3b051ef49107816f61d2c2a3b3563f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "17f505de-eba1-4fc0-9d20-a5db5f1f3437": {"doc_hash": "7c52cb2a49e76c9fc81c32c385d3278b5a40eb1ba0bb09b67f490c61a1dc5537", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f0af8857-c8d0-4783-9e30-b3dc17316850": {"doc_hash": "fdef1c2dc45915a7a88bce43a1e3f144253933536f10e053fbde5d851ea60795", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8fc3e581-2e27-4273-9f62-b610581d35c8": {"doc_hash": "682daa2c8d25b27087a17eec536ce1b190e44c331d1b1b8bd4ccea2ee7674e78", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "bfd51746-8f4f-4e5a-9cb9-1891f4612a37": {"doc_hash": "5781d43a478be4b62aff279bc151a2bc72893bd390a8eaccd265aa3ae249f6b9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "63a946f5-614e-4f0b-a3d9-4d7eba44d239": {"doc_hash": "f059ee0698ba6ce894ac3f869a1b18798ec085e61d8caf7b1845d4a4e1e3b9fc", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fb0eaa3c-59a6-4896-b0f1-b139b803fc82": {"doc_hash": "f65dbe242d5dcba40c986639de0cbf5e8244ccea52c2d2ef8e9981d6d1cd5b56", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8fd2084a-81e6-4942-a108-fd4194781dd3": {"doc_hash": "585c8ce186744f3b522408ef8d88432d392f52762559658e3f23238270f6f277", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "79e1410d-f31a-4a94-ad26-af8fe015f2f0": {"doc_hash": "23031f366bef9406c66e93dc9f23567a6ce46c340ada0861c04be9d6a6c65471", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b50a540f-c634-480e-aa83-fc30b1fd5f25": {"doc_hash": "ebdc63cbb9ecaa118770dfb455605eb5afbdbb5926befa30259479b783754d0a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "4d8265b9-81d0-456b-a1f0-53e01195baea": {"doc_hash": "c75b539a0f7e2898571c14cd05edf4aabbe0b2f8bae68abf1d3318a7564f79ba", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "86d9973f-21b0-4ebf-b925-02dad85aa679": {"doc_hash": "0c016d539e7bbc5dcce3ae507a7e675ef54976abc888682db500714059a66442", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "1440f2be-e94f-4f2c-a37b-67fd11eb46de": {"doc_hash": "bedfeb4d6e129deac08ba5e0d8d1ca5dfe03f6496d990206a53eb274c3a5a3fe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8090ba14-7f38-4daf-b862-09e90de0e046": {"doc_hash": "f5906f72d6361b358e02350aa2e53fef81460eb0702640cc68c4617539118980", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c0d4cb8b-7723-4251-8957-2d8d906cbc46": {"doc_hash": "dc29ce5714fc9e58faa6032a4d55b9aa2d96080d492f241a0543fb972234dec2", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6d802a6d-3f8e-4591-8fda-adc517463116": {"doc_hash": "65b368c8396fc4f1b28f50aae7b74b129cd62964b86a490cca9dcdd00a267b9d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6b9e9e20-907c-4eea-a6f7-db8536fafdb7": {"doc_hash": "53d4c087eecdaa04b3be1c3fa306206581c5dd9b13848b2e17827b3380f42ee6", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a": {"doc_hash": "3d8621257b1162d1464341adf723228f498c866a6d9d2ee4e65ba133c3e528ee", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a4b48e49-421d-4f99-a9bb-a6b1513b5a86": {"doc_hash": "9324c653416f86fcf8352649db44dd94b859d47f8724615b1957fe6315bcbcad", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6bd47f03-3543-4d87-9eca-4c146c8476d5": {"doc_hash": "797fecd90a349326e891a77c165d58513bf41262b1a111e9163067166281d8a5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0c6d92c2-9c60-4670-805c-5ab17c5de6b7": {"doc_hash": "a491321023c025a1eac57f3a431f8de87ff4006c14c65f589229c7c6c09073b8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "ec17855c-fbc3-4722-bdb6-df7f56be2c99": {"doc_hash": "767ed6106b896a70fd9b743fcd3589fb9a3f4a8a1457f9df45563f974309e98c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "054268bf-050f-4f8f-8d16-d655fa7ebae5": {"doc_hash": "bbe8cc161c5cd9824f960c143a38c0bc7020c40947d6c25f334c69bc5dc5c254", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9d4996d3-8df9-413c-b554-a4c3b8670281": {"doc_hash": "6476095c4e6adc65862479f2f4fa51359bf5fcfdef518e9bf22e7e840d871d7c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "64e63e70-92ef-4fd8-93c0-1a3442e48122": {"doc_hash": "9910eb717d598c4584b1757ce9ff9ca8f0e494ed944ad89b61aa49f4ea3631e3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "8bbae6c9-cec0-4940-96d2-de9dfeea7913": {"doc_hash": "e7d0e6262170d9307a39df6da852e31f7f8db66dcdd46803f3baa8b0d92bbe22", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "c574c077-3358-4a46-afeb-89a7034053a8": {"doc_hash": "61bf1742c937b71d6b9e3c9c814d2163e0160a09efc6a7a5a16b7f1201a37419", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9a3ff54e-1669-4043-8d76-a7171553b662": {"doc_hash": "e9be3891daa20dcdf4b2d5a38fec688b4c3d390085cfeda6f8dd3b0235dd6e0c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2d101f06-5fdb-412d-bb11-284b477b526d": {"doc_hash": "2312b2d4952322255590e590a9e78ce50652951ddcb9b2ed270c1553069b86fa", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "f5eede01-7991-4960-b26e-abb4b6112057": {"doc_hash": "87fb88e87622337ec179f96d18770d57dc4a90e43c45cd51c94c1e515ec373a7", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d2128023-5b6f-4293-87d9-f9b6ce300d82": {"doc_hash": "0364bab05e822fdff3ffd5703dc3a4a88b99c6e9951050afeff91b63a3f39297", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "6c01809d-808d-4575-b9e4-5df076d1b87f": {"doc_hash": "6c4533e86bd95142964ce1e58e59e5e1cacf0a14dffc22d241cfed91e3396526", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "77b8d862-2e2b-416f-a1ef-61c02e3981ba": {"doc_hash": "1c0d8542a48dd259181d0dad4f2922302f8e1c10f670ca180acab7ee137f1625", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "55bb919e-075f-47f2-930b-44b9f987b2e6": {"doc_hash": "995e5396921afc9f0608bb8bf317194ac4856dbf7e4fd9a8d19e165a67895304", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5c7717d8-3424-44f0-8f14-a1b33d61052f": {"doc_hash": "c44317e46d1bcfa1d998181e4abb4d3b0b1474216e4fde3aa95fe076c961830c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "da6b6896-c646-42c6-ac16-224663617bde": {"doc_hash": "6476e0ea9ddb4405e93739944500f50f25e7ad46a9a8797f0dc495eddb65710a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "30afc2d8-2758-449e-88d5-79a729e8b528": {"doc_hash": "943754cd45c2408a7fd99e06aed18f67b7e9dc8661ea577e600ae1ddea56f002", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "fbd270ed-190a-487e-9aa5-e16107ea5572": {"doc_hash": "6951122c3507756188d27354b29e2e0eaaeabec5fac3c891dc64a186d2cb82f1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "66f42a79-e531-48d2-839a-be0e2e64c06d": {"doc_hash": "78c3f984544a414c9a734a80aef695bf1338e71747d944efdd549b3cdc0a0cbe", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "a12b77d7-1761-4301-a1e0-6e8426f7426f": {"doc_hash": "94d8b26623210b3a7d05a9ebd0223a08971fa2026a2454c60c90fd37f5de1341", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d49eb40a-2874-42a9-b371-c80448c30d6e": {"doc_hash": "946b04885e469677b0e2d59bfa334d49b30902f20419ebb63fe3daf5683507f8", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9d9f5dee-f7cc-411d-99cc-c8342fd0e285": {"doc_hash": "2f785f3a5ac067f437536636a6c82f22cc48f8c3b87b0a136f1fb8b1b4bdae8f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "830126df-bbee-4e7f-931d-23d2e7f81a64": {"doc_hash": "83dde5e1b615d2d05b15a3c4816828c36a704d09022473b0fd19636e60112b2a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "66e1ea73-e973-43e7-8cc6-b1a147c72a15": {"doc_hash": "226af9ab9e1906a17b93688d9345d7b3a0bab903ede7ad12cbd68309440045b9", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da": {"doc_hash": "a63fd536b301ab44338017fe380c44325c75e812aaba55036fb2b077a3629390", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "3111c0d1-1abb-4b24-b802-42bec515d202": {"doc_hash": "50d46c5a5db608e250005682639a5c99a561ff001bde1c0162f02a5952af1ed3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "b29643f4-3f54-4505-aa74-e439bfb13dff": {"doc_hash": "29b8502144c168bd1c7eb86c4fb08b0991bd8c30d5efa7b2492b70037277987c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "50bffb3f-4038-4968-8849-57f4fb393859": {"doc_hash": "81f728cf4475c2088f614289a4d3118872b1fd4140b25215e5a7c38ff68cc8ea", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77": {"doc_hash": "b16bc3f6f45daea6ec5e04061215c3675f9338aa6739bea7bb084ff2358a186c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0f14e350-6445-4b5f-a34f-55f1535b0a61": {"doc_hash": "eeb39fa196153e857370c38936765ece792a44c894179158d2f232895c6e6e37", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee": {"doc_hash": "2eb0d89705e16b3d40b509453eb0c2e5ca72c9dedfb19a813f9e980bd59e152d", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "15efe044-f628-44a2-9ebc-edd428b40cd3": {"doc_hash": "aac2ce2d27a37eb7b3f04a88a215cd73dfcc680c6a83094f3cdf355120b8388a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9486a348-5e69-4f3b-bdd6-9d03f4e3c688": {"doc_hash": "38335e7b1088191fc1d880da0d3e01c07baea3ab1b71660f1552bc5f1f2037e3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0ca63949-82e1-4217-bd3f-6e5bb73e27fd": {"doc_hash": "894c4cd6eef12d75c016ea40efe9a23390800a12edb8d80efecfe96e5b99152a", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e2c38443-be9a-4f29-8991-640546120884": {"doc_hash": "1beb605f8ab4cf0bbbc574fb83d5fde41ff129c19415c9726c3df48e425f7f6c", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "427a174b-7c18-44cd-8b6a-8a994bc62455": {"doc_hash": "6e78d86d5e773508e9b5b76b78ea3f0ad0f353744dafda66cc921fa8d17a5ef4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "e241febe-6412-48c5-8a16-5e44d034c987": {"doc_hash": "bd170b2f5827830670629176b273243d906980f3d80f7e491e765d7843e92553", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "436ad187-cf6a-4df3-8999-fe7889f88800": {"doc_hash": "bb36842f01ab57ce8616c2ca5d76efe229c7afb275b7ec6bb89067488974bff5", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d83bf9c5-91f3-4873-bd79-797f72f16f13": {"doc_hash": "ae83ec91b5289353228cd780455e0b6ce3bb54f8683bd6d7764af4a8881030d3", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "0cf124c4-29d6-4f97-8a63-012168912766": {"doc_hash": "38c14ca19d658f5c2c1961a619e5a0a03c5ca22ae3e8b92391c04f888510c143", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c": {"doc_hash": "f0cbad1a7888b0fe03b8064532e120078595eb7d2df267601b4b7cf4a7176282", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf": {"doc_hash": "8f94838941ddd6b2e41a3db74bf4b7d74bf3f824fa2dcc53327d0ceed7f60b31", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "d385fdd2-c297-4c6c-ad3e-72160865aca6": {"doc_hash": "6e569bd58a5f0b9ed0f7b96106e8f77d4d6d57b97a938addd33f6013a86008e1", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "2ea28564-d0f1-4c23-a5c8-59e6384fdbac": {"doc_hash": "7202705a798979deed64a9341763c8210c5da8a5c33df54707a30e4a606630a4", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "7620fdfe-7417-4199-94f6-2cda47a57c32": {"doc_hash": "37d648d0f5f6c94e179bfcdf7ee6f75f85c0dc555b83a023dd7f1c0119864a65", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}, "9f92600d-e49e-488b-89da-ae0f1e80d943": {"doc_hash": "02399516160020f38beeae50018994b2d1e87c6a904be3041a98436d419a237f", "ref_doc_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef"}}, "docstore/data": {"aabd4e55-6fdd-4c8b-a0d3-a15f80ae25a2": {"__data__": {"id_": "aabd4e55-6fdd-4c8b-a0d3-a15f80ae25a2", "embedding": null, "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . ", "original_text": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bab022a3-166f-40d5-878c-8884ec28a68e", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . ", "original_text": "Devis Tuia (EPFL)\nProf. "}, "hash": "caebfb40e4970fe9277e1ed1ef212480ec1fc80c5a95e570cb814f799273793d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. ", "mimetype": "text/plain", "start_char_idx": 0, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bab022a3-166f-40d5-878c-8884ec28a68e": {"__data__": {"id_": "bab022a3-166f-40d5-878c-8884ec28a68e", "embedding": null, "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . ", "original_text": "Devis Tuia (EPFL)\nProf. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aabd4e55-6fdd-4c8b-a0d3-a15f80ae25a2", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . ", "original_text": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. "}, "hash": "8eec7b6ea451ee7edf801e3d54f6fa03097989b495d09ab39dba98e7d23e00c2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "67c22b47-4006-4279-84ac-8ed8a95033c3", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . ", "original_text": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . "}, "hash": "294d0478472d92e69b8bee2a1d731be07228efdba6016e529e19c898eaa34392", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Devis Tuia (EPFL)\nProf. ", "mimetype": "text/plain", "start_char_idx": 229, "end_char_idx": 253, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "67c22b47-4006-4279-84ac-8ed8a95033c3": {"__data__": {"id_": "67c22b47-4006-4279-84ac-8ed8a95033c3", "embedding": null, "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . ", "original_text": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bab022a3-166f-40d5-878c-8884ec28a68e", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . ", "original_text": "Devis Tuia (EPFL)\nProf. "}, "hash": "caebfb40e4970fe9277e1ed1ef212480ec1fc80c5a95e570cb814f799273793d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . ", "original_text": ". "}, "hash": "00baca395070115343c3e7fe063eb3349608f025b233a4b7fd8571803261dd22", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . ", "mimetype": "text/plain", "start_char_idx": 253, "end_char_idx": 360, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9": {"__data__": {"id_": "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9", "embedding": null, "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "67c22b47-4006-4279-84ac-8ed8a95033c3", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . ", "original_text": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . "}, "hash": "294d0478472d92e69b8bee2a1d731be07228efdba6016e529e19c898eaa34392", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . ", "original_text": ". "}, "hash": "a8e8e96bc810bc6e5f775379cf4c4cf9536bf41d4a41bd2c4e96375ffc279bbb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35": {"__data__": {"id_": "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35", "embedding": null, "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . ", "original_text": ". "}, "hash": "00baca395070115343c3e7fe063eb3349608f025b233a4b7fd8571803261dd22", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dd0fddcb-cf59-4229-9761-07fe375dcf93", "node_type": "1", "metadata": {"window": "Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . ", "original_text": ". "}, "hash": "499139b516407ffb933efbadf227589d08c1cc5cd1073ba6f24bf589ef365a7e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dd0fddcb-cf59-4229-9761-07fe375dcf93": {"__data__": {"id_": "dd0fddcb-cf59-4229-9761-07fe375dcf93", "embedding": null, "metadata": {"window": "Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35", "node_type": "1", "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . ", "original_text": ". "}, "hash": "a8e8e96bc810bc6e5f775379cf4c4cf9536bf41d4a41bd2c4e96375ffc279bbb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "978fa62b-6aa1-406e-9b9f-aeeae892855e", "node_type": "1", "metadata": {"window": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . . ", "original_text": ". "}, "hash": "437045979365835b84692fce686a7c1128c7587e4b85cd83924a947dde3be031", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "978fa62b-6aa1-406e-9b9f-aeeae892855e": {"__data__": {"id_": "978fa62b-6aa1-406e-9b9f-aeeae892855e", "embedding": null, "metadata": {"window": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dd0fddcb-cf59-4229-9761-07fe375dcf93", "node_type": "1", "metadata": {"window": "Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . ", "original_text": ". "}, "hash": "499139b516407ffb933efbadf227589d08c1cc5cd1073ba6f24bf589ef365a7e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a4081bcd-845c-4af3-8367-4566f59e84ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a4081bcd-845c-4af3-8367-4566f59e84ea": {"__data__": {"id_": "a4081bcd-845c-4af3-8367-4566f59e84ea", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "978fa62b-6aa1-406e-9b9f-aeeae892855e", "node_type": "1", "metadata": {"window": "David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . . . . . . . ", "original_text": ". "}, "hash": "437045979365835b84692fce686a7c1128c7587e4b85cd83924a947dde3be031", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2bcb37eb-15d9-45b0-90a4-9839a4e03171", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2bcb37eb-15d9-45b0-90a4-9839a4e03171": {"__data__": {"id_": "2bcb37eb-15d9-45b0-90a4-9839a4e03171", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a4081bcd-845c-4af3-8367-4566f59e84ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f": {"__data__": {"id_": "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2bcb37eb-15d9-45b0-90a4-9839a4e03171", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "783934d5-5c03-4aa3-91df-0511ee3fe295", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "783934d5-5c03-4aa3-91df-0511ee3fe295": {"__data__": {"id_": "783934d5-5c03-4aa3-91df-0511ee3fe295", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f1537c3f-b20d-4906-9223-ba4db80d26ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f1537c3f-b20d-4906-9223-ba4db80d26ea": {"__data__": {"id_": "f1537c3f-b20d-4906-9223-ba4db80d26ea", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "783934d5-5c03-4aa3-91df-0511ee3fe295", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2ec11b97-ca4a-4f04-af28-a28a973588ad", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2ec11b97-ca4a-4f04-af28-a28a973588ad": {"__data__": {"id_": "2ec11b97-ca4a-4f04-af28-a28a973588ad", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f1537c3f-b20d-4906-9223-ba4db80d26ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "70a3da41-6cec-4681-ba3c-49e2435cf197", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "70a3da41-6cec-4681-ba3c-49e2435cf197": {"__data__": {"id_": "70a3da41-6cec-4681-ba3c-49e2435cf197", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2ec11b97-ca4a-4f04-af28-a28a973588ad", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4": {"__data__": {"id_": "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "70a3da41-6cec-4681-ba3c-49e2435cf197", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dda1c6a3-28f9-4bda-b62f-dff4b810185d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dda1c6a3-28f9-4bda-b62f-dff4b810185d": {"__data__": {"id_": "dda1c6a3-28f9-4bda-b62f-dff4b810185d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d6392f76-98df-4a4f-8c57-87fc17bc43d4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d6392f76-98df-4a4f-8c57-87fc17bc43d4": {"__data__": {"id_": "d6392f76-98df-4a4f-8c57-87fc17bc43d4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dda1c6a3-28f9-4bda-b62f-dff4b810185d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "101ce625-0317-4b48-9c7d-ffa14b7ff2e4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "101ce625-0317-4b48-9c7d-ffa14b7ff2e4": {"__data__": {"id_": "101ce625-0317-4b48-9c7d-ffa14b7ff2e4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d6392f76-98df-4a4f-8c57-87fc17bc43d4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "612e34a5-f745-45ef-a91e-61b5d08ab91f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "612e34a5-f745-45ef-a91e-61b5d08ab91f": {"__data__": {"id_": "612e34a5-f745-45ef-a91e-61b5d08ab91f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "101ce625-0317-4b48-9c7d-ffa14b7ff2e4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d896c04e-1e3b-43d7-9126-c4022852cae6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d896c04e-1e3b-43d7-9126-c4022852cae6": {"__data__": {"id_": "d896c04e-1e3b-43d7-9126-c4022852cae6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "612e34a5-f745-45ef-a91e-61b5d08ab91f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0d478908-16c9-4bee-a1f6-a0b09a03e809", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0d478908-16c9-4bee-a1f6-a0b09a03e809": {"__data__": {"id_": "0d478908-16c9-4bee-a1f6-a0b09a03e809", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d896c04e-1e3b-43d7-9126-c4022852cae6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "caa8c289-2580-416c-820e-ca38387fb3f3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "caa8c289-2580-416c-820e-ca38387fb3f3": {"__data__": {"id_": "caa8c289-2580-416c-820e-ca38387fb3f3", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0d478908-16c9-4bee-a1f6-a0b09a03e809", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2f207ee1-2319-43b0-8519-dc7a066d9635", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2f207ee1-2319-43b0-8519-dc7a066d9635": {"__data__": {"id_": "2f207ee1-2319-43b0-8519-dc7a066d9635", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "caa8c289-2580-416c-820e-ca38387fb3f3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2082ffbc-50a6-4245-869e-13f6cd05b84e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2082ffbc-50a6-4245-869e-13f6cd05b84e": {"__data__": {"id_": "2082ffbc-50a6-4245-869e-13f6cd05b84e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2f207ee1-2319-43b0-8519-dc7a066d9635", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "507c7436-6017-4528-a1d7-2704c59e25f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "507c7436-6017-4528-a1d7-2704c59e25f7": {"__data__": {"id_": "507c7436-6017-4528-a1d7-2704c59e25f7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2082ffbc-50a6-4245-869e-13f6cd05b84e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "98b3c0f2-3971-411e-b1ba-debf0b43e083", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "98b3c0f2-3971-411e-b1ba-debf0b43e083": {"__data__": {"id_": "98b3c0f2-3971-411e-b1ba-debf0b43e083", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "507c7436-6017-4528-a1d7-2704c59e25f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "878cc548-b87a-44d0-a6db-215e183ce7c2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "878cc548-b87a-44d0-a6db-215e183ce7c2": {"__data__": {"id_": "878cc548-b87a-44d0-a6db-215e183ce7c2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "98b3c0f2-3971-411e-b1ba-debf0b43e083", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "18c60443-514d-4459-84d8-360fd8241f31", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "18c60443-514d-4459-84d8-360fd8241f31": {"__data__": {"id_": "18c60443-514d-4459-84d8-360fd8241f31", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "878cc548-b87a-44d0-a6db-215e183ce7c2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cf6fb891-cac2-4509-87d1-65d0fad1ea5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cf6fb891-cac2-4509-87d1-65d0fad1ea5b": {"__data__": {"id_": "cf6fb891-cac2-4509-87d1-65d0fad1ea5b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "18c60443-514d-4459-84d8-360fd8241f31", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "764726e7-9acd-4594-b590-a42270feea79", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "764726e7-9acd-4594-b590-a42270feea79": {"__data__": {"id_": "764726e7-9acd-4594-b590-a42270feea79", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cf6fb891-cac2-4509-87d1-65d0fad1ea5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "27c57ac9-1cb3-493b-9c02-f2d0b28855e4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "27c57ac9-1cb3-493b-9c02-f2d0b28855e4": {"__data__": {"id_": "27c57ac9-1cb3-493b-9c02-f2d0b28855e4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "764726e7-9acd-4594-b590-a42270feea79", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7322f358-1948-4f55-9aea-a451de512f2b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7322f358-1948-4f55-9aea-a451de512f2b": {"__data__": {"id_": "7322f358-1948-4f55-9aea-a451de512f2b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "27c57ac9-1cb3-493b-9c02-f2d0b28855e4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1cd51d40-0a9b-4f77-8b36-da9f272044a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1cd51d40-0a9b-4f77-8b36-da9f272044a3": {"__data__": {"id_": "1cd51d40-0a9b-4f77-8b36-da9f272044a3", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7322f358-1948-4f55-9aea-a451de512f2b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4458f71f-836e-41d1-b9f2-183e5878227d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4458f71f-836e-41d1-b9f2-183e5878227d": {"__data__": {"id_": "4458f71f-836e-41d1-b9f2-183e5878227d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1cd51d40-0a9b-4f77-8b36-da9f272044a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7dc34b2c-b910-4687-9c1a-94cf4247e83f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7dc34b2c-b910-4687-9c1a-94cf4247e83f": {"__data__": {"id_": "7dc34b2c-b910-4687-9c1a-94cf4247e83f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4458f71f-836e-41d1-b9f2-183e5878227d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565": {"__data__": {"id_": "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7dc34b2c-b910-4687-9c1a-94cf4247e83f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91": {"__data__": {"id_": "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aeaa56ff-0b5b-4c6d-937c-cd35745872f1", "node_type": "1", "metadata": {"window": ". . . . . . . . 6\n2.1.1 Code improvements and experiment tracking . ", "original_text": ". "}, "hash": "43268e21e2d03b5c25f4dd7ff41d63c7f3696e08f44c2021cd6e4b9c3226895b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aeaa56ff-0b5b-4c6d-937c-cd35745872f1": {"__data__": {"id_": "aeaa56ff-0b5b-4c6d-937c-cd35745872f1", "embedding": null, "metadata": {"window": ". . . . . . . . 6\n2.1.1 Code improvements and experiment tracking . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "59bf8388-9103-4ffa-a97d-a353069b2ca9", "node_type": "1", "metadata": {"window": ". . . . . . . 6\n2.1.1 Code improvements and experiment tracking . . ", "original_text": ". "}, "hash": "ef785d8137998c8f510e487d8671be757fff1a8b9c741e7b172f1f5e45e7fa23", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "59bf8388-9103-4ffa-a97d-a353069b2ca9": {"__data__": {"id_": "59bf8388-9103-4ffa-a97d-a353069b2ca9", "embedding": null, "metadata": {"window": ". . . . . . . 6\n2.1.1 Code improvements and experiment tracking . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aeaa56ff-0b5b-4c6d-937c-cd35745872f1", "node_type": "1", "metadata": {"window": ". . . . . . . . 6\n2.1.1 Code improvements and experiment tracking . ", "original_text": ". "}, "hash": "43268e21e2d03b5c25f4dd7ff41d63c7f3696e08f44c2021cd6e4b9c3226895b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e013a9c5-8dcf-458a-ad54-d8b12410470e", "node_type": "1", "metadata": {"window": ". . . . . . 6\n2.1.1 Code improvements and experiment tracking . . . ", "original_text": ". "}, "hash": "b0edb7880f02c59724b5f6addd8e6f6622755492f57753b461b75ac1ef1dd95a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e013a9c5-8dcf-458a-ad54-d8b12410470e": {"__data__": {"id_": "e013a9c5-8dcf-458a-ad54-d8b12410470e", "embedding": null, "metadata": {"window": ". . . . . . 6\n2.1.1 Code improvements and experiment tracking . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "59bf8388-9103-4ffa-a97d-a353069b2ca9", "node_type": "1", "metadata": {"window": ". . . . . . . 6\n2.1.1 Code improvements and experiment tracking . . ", "original_text": ". "}, "hash": "ef785d8137998c8f510e487d8671be757fff1a8b9c741e7b172f1f5e45e7fa23", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5a46f5e9-219b-45d0-b79c-cce4a88153a1", "node_type": "1", "metadata": {"window": ". . . . . 6\n2.1.1 Code improvements and experiment tracking . . . . ", "original_text": ". "}, "hash": "28a6a3cf3b9bf043a0afcab8b19bd890e7d6b880d35781cea621f08aa470ad40", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5a46f5e9-219b-45d0-b79c-cce4a88153a1": {"__data__": {"id_": "5a46f5e9-219b-45d0-b79c-cce4a88153a1", "embedding": null, "metadata": {"window": ". . . . . 6\n2.1.1 Code improvements and experiment tracking . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e013a9c5-8dcf-458a-ad54-d8b12410470e", "node_type": "1", "metadata": {"window": ". . . . . . 6\n2.1.1 Code improvements and experiment tracking . . . ", "original_text": ". "}, "hash": "b0edb7880f02c59724b5f6addd8e6f6622755492f57753b461b75ac1ef1dd95a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0212d949-c329-4c0e-b0f3-739a42833930", "node_type": "1", "metadata": {"window": ". . . . 6\n2.1.1 Code improvements and experiment tracking . . . . . ", "original_text": "6\n2.1.1 Code improvements and experiment tracking . "}, "hash": "f1c689e1003608d8ed2e7d19585dc8a087d91e758e80a1339f0cab427a24a2c4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0212d949-c329-4c0e-b0f3-739a42833930": {"__data__": {"id_": "0212d949-c329-4c0e-b0f3-739a42833930", "embedding": null, "metadata": {"window": ". . . . 6\n2.1.1 Code improvements and experiment tracking . . . . . ", "original_text": "6\n2.1.1 Code improvements and experiment tracking . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5a46f5e9-219b-45d0-b79c-cce4a88153a1", "node_type": "1", "metadata": {"window": ". . . . . 6\n2.1.1 Code improvements and experiment tracking . . . . ", "original_text": ". "}, "hash": "28a6a3cf3b9bf043a0afcab8b19bd890e7d6b880d35781cea621f08aa470ad40", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fdaa9304-1361-4b1f-91eb-74fe75240fe9", "node_type": "1", "metadata": {"window": ". . . 6\n2.1.1 Code improvements and experiment tracking . . . . . . ", "original_text": ". "}, "hash": "dbf4fd184044c3a4e958bcdf62f2eecb39192445e5f0e1c7124987eeb2f6163b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "6\n2.1.1 Code improvements and experiment tracking . ", "mimetype": "text/plain", "start_char_idx": 436, "end_char_idx": 488, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fdaa9304-1361-4b1f-91eb-74fe75240fe9": {"__data__": {"id_": "fdaa9304-1361-4b1f-91eb-74fe75240fe9", "embedding": null, "metadata": {"window": ". . . 6\n2.1.1 Code improvements and experiment tracking . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0212d949-c329-4c0e-b0f3-739a42833930", "node_type": "1", "metadata": {"window": ". . . . 6\n2.1.1 Code improvements and experiment tracking . . . . . ", "original_text": "6\n2.1.1 Code improvements and experiment tracking . "}, "hash": "f1c689e1003608d8ed2e7d19585dc8a087d91e758e80a1339f0cab427a24a2c4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9f248ad9-d6d8-4c23-80e8-0be85c2512c1", "node_type": "1", "metadata": {"window": ". . 6\n2.1.1 Code improvements and experiment tracking . . . . . . . ", "original_text": ". "}, "hash": "f293f9191016097593cd09ad21bfb39cbd427c46b1a779eeff4d2363f5574f4c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9f248ad9-d6d8-4c23-80e8-0be85c2512c1": {"__data__": {"id_": "9f248ad9-d6d8-4c23-80e8-0be85c2512c1", "embedding": null, "metadata": {"window": ". . 6\n2.1.1 Code improvements and experiment tracking . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fdaa9304-1361-4b1f-91eb-74fe75240fe9", "node_type": "1", "metadata": {"window": ". . . 6\n2.1.1 Code improvements and experiment tracking . . . . . . ", "original_text": ". "}, "hash": "dbf4fd184044c3a4e958bcdf62f2eecb39192445e5f0e1c7124987eeb2f6163b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "46e47b4f-618e-4a8b-bde3-d6e2db0f4434", "node_type": "1", "metadata": {"window": ". 6\n2.1.1 Code improvements and experiment tracking . . . . . . . . ", "original_text": ". "}, "hash": "0a5b1a294726508259852fb4f7c9c7e253f340ad4976c48898cec1cd8e0d0f92", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "46e47b4f-618e-4a8b-bde3-d6e2db0f4434": {"__data__": {"id_": "46e47b4f-618e-4a8b-bde3-d6e2db0f4434", "embedding": null, "metadata": {"window": ". 6\n2.1.1 Code improvements and experiment tracking . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9f248ad9-d6d8-4c23-80e8-0be85c2512c1", "node_type": "1", "metadata": {"window": ". . 6\n2.1.1 Code improvements and experiment tracking . . . . . . . ", "original_text": ". "}, "hash": "f293f9191016097593cd09ad21bfb39cbd427c46b1a779eeff4d2363f5574f4c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6904607e-ebd7-4584-b0e4-6901a6ded5c9", "node_type": "1", "metadata": {"window": "6\n2.1.1 Code improvements and experiment tracking . . . . . . . . . ", "original_text": ". "}, "hash": "78f9b10cdf5a27699f17bc59e9eeb7ed5e4dfb41891d838161785cf2e646afed", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6904607e-ebd7-4584-b0e4-6901a6ded5c9": {"__data__": {"id_": "6904607e-ebd7-4584-b0e4-6901a6ded5c9", "embedding": null, "metadata": {"window": "6\n2.1.1 Code improvements and experiment tracking . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "46e47b4f-618e-4a8b-bde3-d6e2db0f4434", "node_type": "1", "metadata": {"window": ". 6\n2.1.1 Code improvements and experiment tracking . . . . . . . . ", "original_text": ". "}, "hash": "0a5b1a294726508259852fb4f7c9c7e253f340ad4976c48898cec1cd8e0d0f92", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "189d3591-5eb0-4e8c-8083-15146d6f39c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "189d3591-5eb0-4e8c-8083-15146d6f39c5": {"__data__": {"id_": "189d3591-5eb0-4e8c-8083-15146d6f39c5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6904607e-ebd7-4584-b0e4-6901a6ded5c9", "node_type": "1", "metadata": {"window": "6\n2.1.1 Code improvements and experiment tracking . . . . . . . . . ", "original_text": ". "}, "hash": "78f9b10cdf5a27699f17bc59e9eeb7ed5e4dfb41891d838161785cf2e646afed", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2663141b-9f22-4456-bf58-ce45376a82ee", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2663141b-9f22-4456-bf58-ce45376a82ee": {"__data__": {"id_": "2663141b-9f22-4456-bf58-ce45376a82ee", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "189d3591-5eb0-4e8c-8083-15146d6f39c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55": {"__data__": {"id_": "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2663141b-9f22-4456-bf58-ce45376a82ee", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1d5e2f44-030c-4b16-9486-3821ba0f64f5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1d5e2f44-030c-4b16-9486-3821ba0f64f5": {"__data__": {"id_": "1d5e2f44-030c-4b16-9486-3821ba0f64f5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "72f7a9fe-d9b8-47df-8860-8821a6a795ce", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "72f7a9fe-d9b8-47df-8860-8821a6a795ce": {"__data__": {"id_": "72f7a9fe-d9b8-47df-8860-8821a6a795ce", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1d5e2f44-030c-4b16-9486-3821ba0f64f5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51": {"__data__": {"id_": "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "72f7a9fe-d9b8-47df-8860-8821a6a795ce", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1357de3a-3304-402b-9cc3-c41c92089f29", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1357de3a-3304-402b-9cc3-c41c92089f29": {"__data__": {"id_": "1357de3a-3304-402b-9cc3-c41c92089f29", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3", "node_type": "1", "metadata": {"window": ". . . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . ", "original_text": ". "}, "hash": "fef319934aa8a1756b6e6f0408168148725ca3dd65ed399f9491c7f6b12f8ee2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3": {"__data__": {"id_": "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3", "embedding": null, "metadata": {"window": ". . . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1357de3a-3304-402b-9cc3-c41c92089f29", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a7ae8c96-7b8b-4d67-96b1-3fd93962b469", "node_type": "1", "metadata": {"window": ". . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . ", "original_text": ". "}, "hash": "7b56372d4725aa773c707ff5bf5d23be96163a2f4c5a929a53177fdee1495010", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a7ae8c96-7b8b-4d67-96b1-3fd93962b469": {"__data__": {"id_": "a7ae8c96-7b8b-4d67-96b1-3fd93962b469", "embedding": null, "metadata": {"window": ". . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3", "node_type": "1", "metadata": {"window": ". . . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . ", "original_text": ". "}, "hash": "fef319934aa8a1756b6e6f0408168148725ca3dd65ed399f9491c7f6b12f8ee2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1", "node_type": "1", "metadata": {"window": ". . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . ", "original_text": ". "}, "hash": "d19a4636a2e81a169e378a46e30899528595028eef33d689966f94f8428ff4f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1": {"__data__": {"id_": "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1", "embedding": null, "metadata": {"window": ". . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a7ae8c96-7b8b-4d67-96b1-3fd93962b469", "node_type": "1", "metadata": {"window": ". . . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . ", "original_text": ". "}, "hash": "7b56372d4725aa773c707ff5bf5d23be96163a2f4c5a929a53177fdee1495010", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1fed8e1b-2011-4eae-9beb-b25bcb5396a7", "node_type": "1", "metadata": {"window": ". . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . ", "original_text": ". "}, "hash": "298395b4f564f398b1967b8f20a3fba5b6779b6a362e0f3aa8dc4ca43732b385", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1fed8e1b-2011-4eae-9beb-b25bcb5396a7": {"__data__": {"id_": "1fed8e1b-2011-4eae-9beb-b25bcb5396a7", "embedding": null, "metadata": {"window": ". . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1", "node_type": "1", "metadata": {"window": ". . . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . ", "original_text": ". "}, "hash": "d19a4636a2e81a169e378a46e30899528595028eef33d689966f94f8428ff4f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "79b39b0b-c58d-4515-b58c-4501287cd5a6", "node_type": "1", "metadata": {"window": ". . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . ", "original_text": "6\n2.1.2 Model evaluation, metrics and threshold analysis . "}, "hash": "2e004dcfbfa47d2b1a1fa973e5d50f01cce4e47773bc3d4efbc0d50f739f40ca", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "79b39b0b-c58d-4515-b58c-4501287cd5a6": {"__data__": {"id_": "79b39b0b-c58d-4515-b58c-4501287cd5a6", "embedding": null, "metadata": {"window": ". . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . ", "original_text": "6\n2.1.2 Model evaluation, metrics and threshold analysis . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1fed8e1b-2011-4eae-9beb-b25bcb5396a7", "node_type": "1", "metadata": {"window": ". . . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . ", "original_text": ". "}, "hash": "298395b4f564f398b1967b8f20a3fba5b6779b6a362e0f3aa8dc4ca43732b385", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dbbb0fe6-8046-43e4-84bb-33efb81d9010", "node_type": "1", "metadata": {"window": ". . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . ", "original_text": ". "}, "hash": "04748b54ed90765b07a33509bbad447c4942010a78bc04c22913512e882a52ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "6\n2.1.2 Model evaluation, metrics and threshold analysis . ", "mimetype": "text/plain", "start_char_idx": 518, "end_char_idx": 577, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dbbb0fe6-8046-43e4-84bb-33efb81d9010": {"__data__": {"id_": "dbbb0fe6-8046-43e4-84bb-33efb81d9010", "embedding": null, "metadata": {"window": ". . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "79b39b0b-c58d-4515-b58c-4501287cd5a6", "node_type": "1", "metadata": {"window": ". . . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . ", "original_text": "6\n2.1.2 Model evaluation, metrics and threshold analysis . "}, "hash": "2e004dcfbfa47d2b1a1fa973e5d50f01cce4e47773bc3d4efbc0d50f739f40ca", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a9e6fede-3839-4dcc-a97c-a31b3d738fe5", "node_type": "1", "metadata": {"window": ". . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . ", "original_text": ". "}, "hash": "30eb64ee5b6022054d3636faa2d655072942d32c843fb11708cda68d44dd6f60", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a9e6fede-3839-4dcc-a97c-a31b3d738fe5": {"__data__": {"id_": "a9e6fede-3839-4dcc-a97c-a31b3d738fe5", "embedding": null, "metadata": {"window": ". . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dbbb0fe6-8046-43e4-84bb-33efb81d9010", "node_type": "1", "metadata": {"window": ". . . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . ", "original_text": ". "}, "hash": "04748b54ed90765b07a33509bbad447c4942010a78bc04c22913512e882a52ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f", "node_type": "1", "metadata": {"window": ". 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . ", "original_text": ". "}, "hash": "03e8cd00f088f592f277f27c121af3a6d76b3e49ea56b26120e190c729747bad", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f": {"__data__": {"id_": "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f", "embedding": null, "metadata": {"window": ". 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a9e6fede-3839-4dcc-a97c-a31b3d738fe5", "node_type": "1", "metadata": {"window": ". . 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . ", "original_text": ". "}, "hash": "30eb64ee5b6022054d3636faa2d655072942d32c843fb11708cda68d44dd6f60", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3c0dc0df-d398-402f-b60c-ea1b5eef7478", "node_type": "1", "metadata": {"window": "6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . . ", "original_text": ". "}, "hash": "e0ad0e73e6df7f046cce124410f4787734658f3ca2205640d51b04596f5eca69", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3c0dc0df-d398-402f-b60c-ea1b5eef7478": {"__data__": {"id_": "3c0dc0df-d398-402f-b60c-ea1b5eef7478", "embedding": null, "metadata": {"window": "6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f", "node_type": "1", "metadata": {"window": ". 6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . ", "original_text": ". "}, "hash": "03e8cd00f088f592f277f27c121af3a6d76b3e49ea56b26120e190c729747bad", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "babd73f5-1108-443d-9dd2-f9d573cb7337", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "babd73f5-1108-443d-9dd2-f9d573cb7337": {"__data__": {"id_": "babd73f5-1108-443d-9dd2-f9d573cb7337", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3c0dc0df-d398-402f-b60c-ea1b5eef7478", "node_type": "1", "metadata": {"window": "6\n2.1.2 Model evaluation, metrics and threshold analysis . . . . . . . . . ", "original_text": ". "}, "hash": "e0ad0e73e6df7f046cce124410f4787734658f3ca2205640d51b04596f5eca69", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "21752a6d-37e6-4e72-8467-7c3aca49ffbd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "21752a6d-37e6-4e72-8467-7c3aca49ffbd": {"__data__": {"id_": "21752a6d-37e6-4e72-8467-7c3aca49ffbd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "babd73f5-1108-443d-9dd2-f9d573cb7337", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c6de1561-d737-4afe-addc-f4c1df3896f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c6de1561-d737-4afe-addc-f4c1df3896f7": {"__data__": {"id_": "c6de1561-d737-4afe-addc-f4c1df3896f7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "21752a6d-37e6-4e72-8467-7c3aca49ffbd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2": {"__data__": {"id_": "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c6de1561-d737-4afe-addc-f4c1df3896f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4010c527-89ec-45f4-bc3b-0702fc15e693", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4010c527-89ec-45f4-bc3b-0702fc15e693": {"__data__": {"id_": "4010c527-89ec-45f4-bc3b-0702fc15e693", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0b925135-db51-454a-abe8-4f1338dd016f", "node_type": "1", "metadata": {"window": ". . . . . . . . 7\n2.2 The training data . ", "original_text": ". "}, "hash": "50580be0be50c4b2325763259f932b4ae3d03678ca8dde737d0a1191c0324ff0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0b925135-db51-454a-abe8-4f1338dd016f": {"__data__": {"id_": "0b925135-db51-454a-abe8-4f1338dd016f", "embedding": null, "metadata": {"window": ". . . . . . . . 7\n2.2 The training data . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4010c527-89ec-45f4-bc3b-0702fc15e693", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5a16f20f-94d5-46c9-8fba-d2c5653c4625", "node_type": "1", "metadata": {"window": ". . . . . . . 7\n2.2 The training data . . ", "original_text": ". "}, "hash": "9a8e6b075568606c26fd07e30ac1df296ccb7607112acbdc495e48c11c05f590", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5a16f20f-94d5-46c9-8fba-d2c5653c4625": {"__data__": {"id_": "5a16f20f-94d5-46c9-8fba-d2c5653c4625", "embedding": null, "metadata": {"window": ". . . . . . . 7\n2.2 The training data . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0b925135-db51-454a-abe8-4f1338dd016f", "node_type": "1", "metadata": {"window": ". . . . . . . . 7\n2.2 The training data . ", "original_text": ". "}, "hash": "50580be0be50c4b2325763259f932b4ae3d03678ca8dde737d0a1191c0324ff0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "99084c8e-364e-4e73-b47a-be787bb2a107", "node_type": "1", "metadata": {"window": ". . . . . . 7\n2.2 The training data . . . ", "original_text": ". "}, "hash": "8239c1212269a05bb9a7cfef6aedabd999640ba0fee3597ae3fbf311e43d0f0e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "99084c8e-364e-4e73-b47a-be787bb2a107": {"__data__": {"id_": "99084c8e-364e-4e73-b47a-be787bb2a107", "embedding": null, "metadata": {"window": ". . . . . . 7\n2.2 The training data . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5a16f20f-94d5-46c9-8fba-d2c5653c4625", "node_type": "1", "metadata": {"window": ". . . . . . . 7\n2.2 The training data . . ", "original_text": ". "}, "hash": "9a8e6b075568606c26fd07e30ac1df296ccb7607112acbdc495e48c11c05f590", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6de4b45f-3d4e-4981-be81-49cdd631f2f9", "node_type": "1", "metadata": {"window": ". . . . . 7\n2.2 The training data . . . . ", "original_text": ". "}, "hash": "60670cb4e7947dab68c0f91bb5061629e00e0cc7ef859d331a4f5fb81a73d804", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6de4b45f-3d4e-4981-be81-49cdd631f2f9": {"__data__": {"id_": "6de4b45f-3d4e-4981-be81-49cdd631f2f9", "embedding": null, "metadata": {"window": ". . . . . 7\n2.2 The training data . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "99084c8e-364e-4e73-b47a-be787bb2a107", "node_type": "1", "metadata": {"window": ". . . . . . 7\n2.2 The training data . . . ", "original_text": ". "}, "hash": "8239c1212269a05bb9a7cfef6aedabd999640ba0fee3597ae3fbf311e43d0f0e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ffec42bb-fed8-4d55-9de6-de0fdd96aa16", "node_type": "1", "metadata": {"window": ". . . . 7\n2.2 The training data . . . . . ", "original_text": "7\n2.2 The training data . "}, "hash": "b78e3e59fbcbca47a6d89851024a3b7efcc7be14d3b92f0d49665cb171836f88", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ffec42bb-fed8-4d55-9de6-de0fdd96aa16": {"__data__": {"id_": "ffec42bb-fed8-4d55-9de6-de0fdd96aa16", "embedding": null, "metadata": {"window": ". . . . 7\n2.2 The training data . . . . . ", "original_text": "7\n2.2 The training data . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6de4b45f-3d4e-4981-be81-49cdd631f2f9", "node_type": "1", "metadata": {"window": ". . . . . 7\n2.2 The training data . . . . ", "original_text": ". "}, "hash": "60670cb4e7947dab68c0f91bb5061629e00e0cc7ef859d331a4f5fb81a73d804", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17f35e00-841c-4ea7-8378-3f48fc651993", "node_type": "1", "metadata": {"window": ". . . 7\n2.2 The training data . . . . . . ", "original_text": ". "}, "hash": "2ad6f8f602c681c115c9e565d30c555e90c38a3319038b9290767cf067c99441", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "7\n2.2 The training data . ", "mimetype": "text/plain", "start_char_idx": 603, "end_char_idx": 629, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17f35e00-841c-4ea7-8378-3f48fc651993": {"__data__": {"id_": "17f35e00-841c-4ea7-8378-3f48fc651993", "embedding": null, "metadata": {"window": ". . . 7\n2.2 The training data . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ffec42bb-fed8-4d55-9de6-de0fdd96aa16", "node_type": "1", "metadata": {"window": ". . . . 7\n2.2 The training data . . . . . ", "original_text": "7\n2.2 The training data . "}, "hash": "b78e3e59fbcbca47a6d89851024a3b7efcc7be14d3b92f0d49665cb171836f88", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4d24ae53-f918-4097-a23f-94c9be43125e", "node_type": "1", "metadata": {"window": ". . 7\n2.2 The training data . . . . . . . ", "original_text": ". "}, "hash": "a674ba89f9a75147ba7c6cd3b08f98ad8e955f66c81bd5dc89cda968b3549da3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4d24ae53-f918-4097-a23f-94c9be43125e": {"__data__": {"id_": "4d24ae53-f918-4097-a23f-94c9be43125e", "embedding": null, "metadata": {"window": ". . 7\n2.2 The training data . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17f35e00-841c-4ea7-8378-3f48fc651993", "node_type": "1", "metadata": {"window": ". . . 7\n2.2 The training data . . . . . . ", "original_text": ". "}, "hash": "2ad6f8f602c681c115c9e565d30c555e90c38a3319038b9290767cf067c99441", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e4072659-f32c-4460-9a09-8439bf6bdd70", "node_type": "1", "metadata": {"window": ". 7\n2.2 The training data . . . . . . . . ", "original_text": ". "}, "hash": "40b977a86f152abd436181309d54c4cecdbc2b04cc04d96c813f73385c084f7f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e4072659-f32c-4460-9a09-8439bf6bdd70": {"__data__": {"id_": "e4072659-f32c-4460-9a09-8439bf6bdd70", "embedding": null, "metadata": {"window": ". 7\n2.2 The training data . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4d24ae53-f918-4097-a23f-94c9be43125e", "node_type": "1", "metadata": {"window": ". . 7\n2.2 The training data . . . . . . . ", "original_text": ". "}, "hash": "a674ba89f9a75147ba7c6cd3b08f98ad8e955f66c81bd5dc89cda968b3549da3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "efd429eb-de49-44f3-8264-fd7b30873af8", "node_type": "1", "metadata": {"window": "7\n2.2 The training data . . . . . . . . . ", "original_text": ". "}, "hash": "8b28231a20632bfa3b8d3fd6e3a5013dc7d8d5a67cb28f5bedc7fa1e070cd280", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "efd429eb-de49-44f3-8264-fd7b30873af8": {"__data__": {"id_": "efd429eb-de49-44f3-8264-fd7b30873af8", "embedding": null, "metadata": {"window": "7\n2.2 The training data . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e4072659-f32c-4460-9a09-8439bf6bdd70", "node_type": "1", "metadata": {"window": ". 7\n2.2 The training data . . . . . . . . ", "original_text": ". "}, "hash": "40b977a86f152abd436181309d54c4cecdbc2b04cc04d96c813f73385c084f7f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c69be104-16fd-4e86-8e3b-e9f02e15be3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c69be104-16fd-4e86-8e3b-e9f02e15be3f": {"__data__": {"id_": "c69be104-16fd-4e86-8e3b-e9f02e15be3f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "efd429eb-de49-44f3-8264-fd7b30873af8", "node_type": "1", "metadata": {"window": "7\n2.2 The training data . . . . . . . . . ", "original_text": ". "}, "hash": "8b28231a20632bfa3b8d3fd6e3a5013dc7d8d5a67cb28f5bedc7fa1e070cd280", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2ca64856-cdc6-4999-9ecb-efaa18d55ec6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2ca64856-cdc6-4999-9ecb-efaa18d55ec6": {"__data__": {"id_": "2ca64856-cdc6-4999-9ecb-efaa18d55ec6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c69be104-16fd-4e86-8e3b-e9f02e15be3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5": {"__data__": {"id_": "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2ca64856-cdc6-4999-9ecb-efaa18d55ec6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ef907f66-da87-4b13-82c9-ab181afecbec", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ef907f66-da87-4b13-82c9-ab181afecbec": {"__data__": {"id_": "ef907f66-da87-4b13-82c9-ab181afecbec", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9b0d6415-7740-493b-9a44-cd5a3aaf7d13", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9b0d6415-7740-493b-9a44-cd5a3aaf7d13": {"__data__": {"id_": "9b0d6415-7740-493b-9a44-cd5a3aaf7d13", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ef907f66-da87-4b13-82c9-ab181afecbec", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "740217d9-52ed-4e40-a368-2dd50cc01d8d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "740217d9-52ed-4e40-a368-2dd50cc01d8d": {"__data__": {"id_": "740217d9-52ed-4e40-a368-2dd50cc01d8d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9b0d6415-7740-493b-9a44-cd5a3aaf7d13", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ca795ca9-59ba-4997-912b-09ff52304745", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ca795ca9-59ba-4997-912b-09ff52304745": {"__data__": {"id_": "ca795ca9-59ba-4997-912b-09ff52304745", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "740217d9-52ed-4e40-a368-2dd50cc01d8d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b7135b08-5e3e-4701-9d20-ce9468e826c7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b7135b08-5e3e-4701-9d20-ce9468e826c7": {"__data__": {"id_": "b7135b08-5e3e-4701-9d20-ce9468e826c7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ca795ca9-59ba-4997-912b-09ff52304745", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a159840a-c64e-4402-a930-2a5554fdb450", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a159840a-c64e-4402-a930-2a5554fdb450": {"__data__": {"id_": "a159840a-c64e-4402-a930-2a5554fdb450", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b7135b08-5e3e-4701-9d20-ce9468e826c7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d4a07581-a9e4-4ad1-8e89-a38aac6f079a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d4a07581-a9e4-4ad1-8e89-a38aac6f079a": {"__data__": {"id_": "d4a07581-a9e4-4ad1-8e89-a38aac6f079a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a159840a-c64e-4402-a930-2a5554fdb450", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "75ccfd70-2249-4b3e-94d7-6839cd83c778", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "75ccfd70-2249-4b3e-94d7-6839cd83c778": {"__data__": {"id_": "75ccfd70-2249-4b3e-94d7-6839cd83c778", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d4a07581-a9e4-4ad1-8e89-a38aac6f079a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "08d2b254-bdde-4efd-8e7a-06f18cba1baa", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "08d2b254-bdde-4efd-8e7a-06f18cba1baa": {"__data__": {"id_": "08d2b254-bdde-4efd-8e7a-06f18cba1baa", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "75ccfd70-2249-4b3e-94d7-6839cd83c778", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c086bd0-2d3d-495e-bffa-33a55cdf36d9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c086bd0-2d3d-495e-bffa-33a55cdf36d9": {"__data__": {"id_": "6c086bd0-2d3d-495e-bffa-33a55cdf36d9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "08d2b254-bdde-4efd-8e7a-06f18cba1baa", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6acc0c95-d875-4613-8610-ef4015b413fd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6acc0c95-d875-4613-8610-ef4015b413fd": {"__data__": {"id_": "6acc0c95-d875-4613-8610-ef4015b413fd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c086bd0-2d3d-495e-bffa-33a55cdf36d9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c45b8827-2c7b-4c4e-b08c-be963bcb4723", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c45b8827-2c7b-4c4e-b08c-be963bcb4723": {"__data__": {"id_": "c45b8827-2c7b-4c4e-b08c-be963bcb4723", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6acc0c95-d875-4613-8610-ef4015b413fd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "de581595-f4e7-493a-9c05-a8464b0e794c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "de581595-f4e7-493a-9c05-a8464b0e794c": {"__data__": {"id_": "de581595-f4e7-493a-9c05-a8464b0e794c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c45b8827-2c7b-4c4e-b08c-be963bcb4723", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa": {"__data__": {"id_": "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "de581595-f4e7-493a-9c05-a8464b0e794c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5": {"__data__": {"id_": "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018": {"__data__": {"id_": "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac": {"__data__": {"id_": "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bdfda74e-24bb-4e95-a143-6a8e3467a638", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bdfda74e-24bb-4e95-a143-6a8e3467a638": {"__data__": {"id_": "bdfda74e-24bb-4e95-a143-6a8e3467a638", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa58176d-ea29-406d-9bcf-5d34aa5dab89", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa58176d-ea29-406d-9bcf-5d34aa5dab89": {"__data__": {"id_": "aa58176d-ea29-406d-9bcf-5d34aa5dab89", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bdfda74e-24bb-4e95-a143-6a8e3467a638", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867": {"__data__": {"id_": "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa58176d-ea29-406d-9bcf-5d34aa5dab89", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "834ed8e7-f596-4bbc-8560-54ecceb13371", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "834ed8e7-f596-4bbc-8560-54ecceb13371": {"__data__": {"id_": "834ed8e7-f596-4bbc-8560-54ecceb13371", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "093c16ee-170f-49d2-be11-dd5e16e84aeb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "093c16ee-170f-49d2-be11-dd5e16e84aeb": {"__data__": {"id_": "093c16ee-170f-49d2-be11-dd5e16e84aeb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "834ed8e7-f596-4bbc-8560-54ecceb13371", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f": {"__data__": {"id_": "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "093c16ee-170f-49d2-be11-dd5e16e84aeb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a789d0e5-367f-4420-b723-f24130d3bf5c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a789d0e5-367f-4420-b723-f24130d3bf5c": {"__data__": {"id_": "a789d0e5-367f-4420-b723-f24130d3bf5c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c4b036bb-f145-4977-87f1-ecb1c17a2d37", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c4b036bb-f145-4977-87f1-ecb1c17a2d37": {"__data__": {"id_": "c4b036bb-f145-4977-87f1-ecb1c17a2d37", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a789d0e5-367f-4420-b723-f24130d3bf5c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1d5275b2-71a0-46cb-94e6-97dee041d00e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1d5275b2-71a0-46cb-94e6-97dee041d00e": {"__data__": {"id_": "1d5275b2-71a0-46cb-94e6-97dee041d00e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c4b036bb-f145-4977-87f1-ecb1c17a2d37", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c1f3995-67f2-451c-8839-a55791bebc8f", "node_type": "1", "metadata": {"window": ". . . . . . . . 9\n2.2.1 The need for new training datasets . ", "original_text": ". "}, "hash": "6fd193b2d01323b91aac1c40b7630373797606e7ca6008e61c010fa961917cfd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c1f3995-67f2-451c-8839-a55791bebc8f": {"__data__": {"id_": "6c1f3995-67f2-451c-8839-a55791bebc8f", "embedding": null, "metadata": {"window": ". . . . . . . . 9\n2.2.1 The need for new training datasets . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1d5275b2-71a0-46cb-94e6-97dee041d00e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "65faa0e3-e146-4ce5-88b5-cf22f879afe9", "node_type": "1", "metadata": {"window": ". . . . . . . 9\n2.2.1 The need for new training datasets . . ", "original_text": ". "}, "hash": "bce2e645e03d965432c766e6c7261a642b55c0534ab071e05e2a7b8951f4e204", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "65faa0e3-e146-4ce5-88b5-cf22f879afe9": {"__data__": {"id_": "65faa0e3-e146-4ce5-88b5-cf22f879afe9", "embedding": null, "metadata": {"window": ". . . . . . . 9\n2.2.1 The need for new training datasets . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c1f3995-67f2-451c-8839-a55791bebc8f", "node_type": "1", "metadata": {"window": ". . . . . . . . 9\n2.2.1 The need for new training datasets . ", "original_text": ". "}, "hash": "6fd193b2d01323b91aac1c40b7630373797606e7ca6008e61c010fa961917cfd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b0412f5e-93aa-4f67-95a8-d482d39cbfd6", "node_type": "1", "metadata": {"window": ". . . . . . 9\n2.2.1 The need for new training datasets . . . ", "original_text": ". "}, "hash": "3fd163c04a4864d780be9604459ed258357b19a5b647d13a7044655404035f97", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b0412f5e-93aa-4f67-95a8-d482d39cbfd6": {"__data__": {"id_": "b0412f5e-93aa-4f67-95a8-d482d39cbfd6", "embedding": null, "metadata": {"window": ". . . . . . 9\n2.2.1 The need for new training datasets . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "65faa0e3-e146-4ce5-88b5-cf22f879afe9", "node_type": "1", "metadata": {"window": ". . . . . . . 9\n2.2.1 The need for new training datasets . . ", "original_text": ". "}, "hash": "bce2e645e03d965432c766e6c7261a642b55c0534ab071e05e2a7b8951f4e204", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9cc1d81c-4140-4313-9787-81ce77bf8ac1", "node_type": "1", "metadata": {"window": ". . . . . 9\n2.2.1 The need for new training datasets . . . . ", "original_text": ". "}, "hash": "4d434794f0ce58e5da699372d01c5f9a4850aceaab084c2211873bc37baa7fc8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9cc1d81c-4140-4313-9787-81ce77bf8ac1": {"__data__": {"id_": "9cc1d81c-4140-4313-9787-81ce77bf8ac1", "embedding": null, "metadata": {"window": ". . . . . 9\n2.2.1 The need for new training datasets . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b0412f5e-93aa-4f67-95a8-d482d39cbfd6", "node_type": "1", "metadata": {"window": ". . . . . . 9\n2.2.1 The need for new training datasets . . . ", "original_text": ". "}, "hash": "3fd163c04a4864d780be9604459ed258357b19a5b647d13a7044655404035f97", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "36363011-1120-43db-b3e3-fc0d1810b739", "node_type": "1", "metadata": {"window": ". . . . 9\n2.2.1 The need for new training datasets . . . . . ", "original_text": "9\n2.2.1 The need for new training datasets . "}, "hash": "15e56e8105b354bb18590d6dabca5508f0e36f5bc5d1f7878d6e97204003ac6d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "36363011-1120-43db-b3e3-fc0d1810b739": {"__data__": {"id_": "36363011-1120-43db-b3e3-fc0d1810b739", "embedding": null, "metadata": {"window": ". . . . 9\n2.2.1 The need for new training datasets . . . . . ", "original_text": "9\n2.2.1 The need for new training datasets . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9cc1d81c-4140-4313-9787-81ce77bf8ac1", "node_type": "1", "metadata": {"window": ". . . . . 9\n2.2.1 The need for new training datasets . . . . ", "original_text": ". "}, "hash": "4d434794f0ce58e5da699372d01c5f9a4850aceaab084c2211873bc37baa7fc8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d196081-f067-4f9f-83a7-7fd7e918cc47", "node_type": "1", "metadata": {"window": ". . . 9\n2.2.1 The need for new training datasets . . . . . . ", "original_text": ". "}, "hash": "ba536574f882ed3425d4f8a61c5d4a6457d9071becf1d78b3e6282fcc7751123", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "9\n2.2.1 The need for new training datasets . ", "mimetype": "text/plain", "start_char_idx": 703, "end_char_idx": 748, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d196081-f067-4f9f-83a7-7fd7e918cc47": {"__data__": {"id_": "6d196081-f067-4f9f-83a7-7fd7e918cc47", "embedding": null, "metadata": {"window": ". . . 9\n2.2.1 The need for new training datasets . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "36363011-1120-43db-b3e3-fc0d1810b739", "node_type": "1", "metadata": {"window": ". . . . 9\n2.2.1 The need for new training datasets . . . . . ", "original_text": "9\n2.2.1 The need for new training datasets . "}, "hash": "15e56e8105b354bb18590d6dabca5508f0e36f5bc5d1f7878d6e97204003ac6d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a550f6e2-6e70-4320-acfb-d4878568553c", "node_type": "1", "metadata": {"window": ". . 9\n2.2.1 The need for new training datasets . . . . . . . ", "original_text": ". "}, "hash": "36a7ebdc3ab80b34221799a403f2e92fd7f2c3cb0546e3ef0e891a8e868f6d48", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a550f6e2-6e70-4320-acfb-d4878568553c": {"__data__": {"id_": "a550f6e2-6e70-4320-acfb-d4878568553c", "embedding": null, "metadata": {"window": ". . 9\n2.2.1 The need for new training datasets . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d196081-f067-4f9f-83a7-7fd7e918cc47", "node_type": "1", "metadata": {"window": ". . . 9\n2.2.1 The need for new training datasets . . . . . . ", "original_text": ". "}, "hash": "ba536574f882ed3425d4f8a61c5d4a6457d9071becf1d78b3e6282fcc7751123", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce", "node_type": "1", "metadata": {"window": ". 9\n2.2.1 The need for new training datasets . . . . . . . . ", "original_text": ". "}, "hash": "c4ff8a7f154426b8a19ee2de35e66d24e31a0a208408cf435e167396b3c234bc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce": {"__data__": {"id_": "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce", "embedding": null, "metadata": {"window": ". 9\n2.2.1 The need for new training datasets . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a550f6e2-6e70-4320-acfb-d4878568553c", "node_type": "1", "metadata": {"window": ". . 9\n2.2.1 The need for new training datasets . . . . . . . ", "original_text": ". "}, "hash": "36a7ebdc3ab80b34221799a403f2e92fd7f2c3cb0546e3ef0e891a8e868f6d48", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "47c57a76-4bc3-4dad-b25d-ed7523b85304", "node_type": "1", "metadata": {"window": "9\n2.2.1 The need for new training datasets . . . . . . . . . ", "original_text": ". "}, "hash": "e789ba3971609748315d90ca2042b4c999755e507c7e5f66036cc7a210fa8858", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "47c57a76-4bc3-4dad-b25d-ed7523b85304": {"__data__": {"id_": "47c57a76-4bc3-4dad-b25d-ed7523b85304", "embedding": null, "metadata": {"window": "9\n2.2.1 The need for new training datasets . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce", "node_type": "1", "metadata": {"window": ". 9\n2.2.1 The need for new training datasets . . . . . . . . ", "original_text": ". "}, "hash": "c4ff8a7f154426b8a19ee2de35e66d24e31a0a208408cf435e167396b3c234bc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e7357c55-81dc-4084-8c5d-8093fa6868b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e7357c55-81dc-4084-8c5d-8093fa6868b7": {"__data__": {"id_": "e7357c55-81dc-4084-8c5d-8093fa6868b7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "47c57a76-4bc3-4dad-b25d-ed7523b85304", "node_type": "1", "metadata": {"window": "9\n2.2.1 The need for new training datasets . . . . . . . . . ", "original_text": ". "}, "hash": "e789ba3971609748315d90ca2042b4c999755e507c7e5f66036cc7a210fa8858", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6919eb0f-63de-4090-b6b8-129017b2ce6c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6919eb0f-63de-4090-b6b8-129017b2ce6c": {"__data__": {"id_": "6919eb0f-63de-4090-b6b8-129017b2ce6c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e7357c55-81dc-4084-8c5d-8093fa6868b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e08d802d-4c3e-4599-9d52-f193741f319d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e08d802d-4c3e-4599-9d52-f193741f319d": {"__data__": {"id_": "e08d802d-4c3e-4599-9d52-f193741f319d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6919eb0f-63de-4090-b6b8-129017b2ce6c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bde27828-e6be-4770-ae2b-58763be28ddb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bde27828-e6be-4770-ae2b-58763be28ddb": {"__data__": {"id_": "bde27828-e6be-4770-ae2b-58763be28ddb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e08d802d-4c3e-4599-9d52-f193741f319d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc": {"__data__": {"id_": "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bde27828-e6be-4770-ae2b-58763be28ddb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "014641be-e7ab-42a3-92f2-0f1bd7038785", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "014641be-e7ab-42a3-92f2-0f1bd7038785": {"__data__": {"id_": "014641be-e7ab-42a3-92f2-0f1bd7038785", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ffce041c-2665-4d39-9521-d0544e331bb4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ffce041c-2665-4d39-9521-d0544e331bb4": {"__data__": {"id_": "ffce041c-2665-4d39-9521-d0544e331bb4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "014641be-e7ab-42a3-92f2-0f1bd7038785", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5473394e-23a3-469a-b334-862600ccc619", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5473394e-23a3-469a-b334-862600ccc619": {"__data__": {"id_": "5473394e-23a3-469a-b334-862600ccc619", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ffce041c-2665-4d39-9521-d0544e331bb4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25c0446d-81ca-456b-80f1-4bce41d1b828", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25c0446d-81ca-456b-80f1-4bce41d1b828": {"__data__": {"id_": "25c0446d-81ca-456b-80f1-4bce41d1b828", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5473394e-23a3-469a-b334-862600ccc619", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f7d3365d-df08-4936-8522-e28c0ea54b5e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f7d3365d-df08-4936-8522-e28c0ea54b5e": {"__data__": {"id_": "f7d3365d-df08-4936-8522-e28c0ea54b5e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25c0446d-81ca-456b-80f1-4bce41d1b828", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d4caf388-9558-42d4-be52-07473b5c5666", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d4caf388-9558-42d4-be52-07473b5c5666": {"__data__": {"id_": "d4caf388-9558-42d4-be52-07473b5c5666", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f7d3365d-df08-4936-8522-e28c0ea54b5e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "352da421-fb3a-4ae8-ab77-4b13552a425a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "352da421-fb3a-4ae8-ab77-4b13552a425a": {"__data__": {"id_": "352da421-fb3a-4ae8-ab77-4b13552a425a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d4caf388-9558-42d4-be52-07473b5c5666", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "84314055-0f03-4138-ac94-b0eeaa8d2413", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "84314055-0f03-4138-ac94-b0eeaa8d2413": {"__data__": {"id_": "84314055-0f03-4138-ac94-b0eeaa8d2413", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "352da421-fb3a-4ae8-ab77-4b13552a425a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c20952de-7755-4ca1-b614-3f248af3bb85", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c20952de-7755-4ca1-b614-3f248af3bb85": {"__data__": {"id_": "c20952de-7755-4ca1-b614-3f248af3bb85", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "84314055-0f03-4138-ac94-b0eeaa8d2413", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "39e50d86-4c32-4a52-86ed-4ed50a98693f", "node_type": "1", "metadata": {"window": ". . . . . . . . 9\n2.2.2 Garbage in, garbage out . ", "original_text": ". "}, "hash": "88bac462f9117548da25ef38178ab0a0d06340c3b57eb1f1d3d9fcd34dcf85e6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "39e50d86-4c32-4a52-86ed-4ed50a98693f": {"__data__": {"id_": "39e50d86-4c32-4a52-86ed-4ed50a98693f", "embedding": null, "metadata": {"window": ". . . . . . . . 9\n2.2.2 Garbage in, garbage out . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c20952de-7755-4ca1-b614-3f248af3bb85", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cff100f8-481a-4770-b9be-06c363f9e15d", "node_type": "1", "metadata": {"window": ". . . . . . . 9\n2.2.2 Garbage in, garbage out . . ", "original_text": ". "}, "hash": "bc3e9509b265e23e3ddf78792600fdc707dc7b271b07125df7c2c79f2b2155a2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cff100f8-481a-4770-b9be-06c363f9e15d": {"__data__": {"id_": "cff100f8-481a-4770-b9be-06c363f9e15d", "embedding": null, "metadata": {"window": ". . . . . . . 9\n2.2.2 Garbage in, garbage out . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "39e50d86-4c32-4a52-86ed-4ed50a98693f", "node_type": "1", "metadata": {"window": ". . . . . . . . 9\n2.2.2 Garbage in, garbage out . ", "original_text": ". "}, "hash": "88bac462f9117548da25ef38178ab0a0d06340c3b57eb1f1d3d9fcd34dcf85e6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "085a6f53-a455-4d21-ab9a-eec4eaf4a413", "node_type": "1", "metadata": {"window": ". . . . . . 9\n2.2.2 Garbage in, garbage out . . . ", "original_text": ". "}, "hash": "957fb38c477a0a0b8e6f38bc3bf38b1654aa7d71d26eca315b16d939a6b60afa", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "085a6f53-a455-4d21-ab9a-eec4eaf4a413": {"__data__": {"id_": "085a6f53-a455-4d21-ab9a-eec4eaf4a413", "embedding": null, "metadata": {"window": ". . . . . . 9\n2.2.2 Garbage in, garbage out . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cff100f8-481a-4770-b9be-06c363f9e15d", "node_type": "1", "metadata": {"window": ". . . . . . . 9\n2.2.2 Garbage in, garbage out . . ", "original_text": ". "}, "hash": "bc3e9509b265e23e3ddf78792600fdc707dc7b271b07125df7c2c79f2b2155a2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "254381f1-7218-48d4-b950-c02677074873", "node_type": "1", "metadata": {"window": ". . . . . 9\n2.2.2 Garbage in, garbage out . . . . ", "original_text": ". "}, "hash": "4d5264f3b60cf3bde8282ccf142de33ee5acaca2fc8d6d0321d17e81a5d9508c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "254381f1-7218-48d4-b950-c02677074873": {"__data__": {"id_": "254381f1-7218-48d4-b950-c02677074873", "embedding": null, "metadata": {"window": ". . . . . 9\n2.2.2 Garbage in, garbage out . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "085a6f53-a455-4d21-ab9a-eec4eaf4a413", "node_type": "1", "metadata": {"window": ". . . . . . 9\n2.2.2 Garbage in, garbage out . . . ", "original_text": ". "}, "hash": "957fb38c477a0a0b8e6f38bc3bf38b1654aa7d71d26eca315b16d939a6b60afa", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f277422e-3e8f-4cfd-854e-23d65bb7ac1f", "node_type": "1", "metadata": {"window": ". . . . 9\n2.2.2 Garbage in, garbage out . . . . . ", "original_text": "9\n2.2.2 Garbage in, garbage out . "}, "hash": "c2b4ad2f5122d7cbd201b553a2a600bca9d2fdc497d3700975be7882257ef4a4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f277422e-3e8f-4cfd-854e-23d65bb7ac1f": {"__data__": {"id_": "f277422e-3e8f-4cfd-854e-23d65bb7ac1f", "embedding": null, "metadata": {"window": ". . . . 9\n2.2.2 Garbage in, garbage out . . . . . ", "original_text": "9\n2.2.2 Garbage in, garbage out . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "254381f1-7218-48d4-b950-c02677074873", "node_type": "1", "metadata": {"window": ". . . . . 9\n2.2.2 Garbage in, garbage out . . . . ", "original_text": ". "}, "hash": "4d5264f3b60cf3bde8282ccf142de33ee5acaca2fc8d6d0321d17e81a5d9508c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "700efc3f-8618-4459-8280-c2f12a0baeb3", "node_type": "1", "metadata": {"window": ". . . 9\n2.2.2 Garbage in, garbage out . . . . . . ", "original_text": ". "}, "hash": "b9819d8fb34c17333ba52147b332a07eb01437742975fca062270c4d9a15d650", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "9\n2.2.2 Garbage in, garbage out . ", "mimetype": "text/plain", "start_char_idx": 792, "end_char_idx": 826, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "700efc3f-8618-4459-8280-c2f12a0baeb3": {"__data__": {"id_": "700efc3f-8618-4459-8280-c2f12a0baeb3", "embedding": null, "metadata": {"window": ". . . 9\n2.2.2 Garbage in, garbage out . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f277422e-3e8f-4cfd-854e-23d65bb7ac1f", "node_type": "1", "metadata": {"window": ". . . . 9\n2.2.2 Garbage in, garbage out . . . . . ", "original_text": "9\n2.2.2 Garbage in, garbage out . "}, "hash": "c2b4ad2f5122d7cbd201b553a2a600bca9d2fdc497d3700975be7882257ef4a4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "54beb635-5444-411e-a512-2fac95901cdd", "node_type": "1", "metadata": {"window": ". . 9\n2.2.2 Garbage in, garbage out . . . . . . . ", "original_text": ". "}, "hash": "54664c9c4eba3a45565afb4423e35c955925bb8057ba8ffa454d5ddac9b18312", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "54beb635-5444-411e-a512-2fac95901cdd": {"__data__": {"id_": "54beb635-5444-411e-a512-2fac95901cdd", "embedding": null, "metadata": {"window": ". . 9\n2.2.2 Garbage in, garbage out . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "700efc3f-8618-4459-8280-c2f12a0baeb3", "node_type": "1", "metadata": {"window": ". . . 9\n2.2.2 Garbage in, garbage out . . . . . . ", "original_text": ". "}, "hash": "b9819d8fb34c17333ba52147b332a07eb01437742975fca062270c4d9a15d650", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "61ce1819-7253-4549-856e-eba686e56983", "node_type": "1", "metadata": {"window": ". 9\n2.2.2 Garbage in, garbage out . . . . . . . . ", "original_text": ". "}, "hash": "ce1331beac761fafc4d3fab301805eb35c11fe0778aa6101983faca4311c97fa", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "61ce1819-7253-4549-856e-eba686e56983": {"__data__": {"id_": "61ce1819-7253-4549-856e-eba686e56983", "embedding": null, "metadata": {"window": ". 9\n2.2.2 Garbage in, garbage out . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "54beb635-5444-411e-a512-2fac95901cdd", "node_type": "1", "metadata": {"window": ". . 9\n2.2.2 Garbage in, garbage out . . . . . . . ", "original_text": ". "}, "hash": "54664c9c4eba3a45565afb4423e35c955925bb8057ba8ffa454d5ddac9b18312", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c5d5e7fa-16be-4165-a8fc-d0d72b36a255", "node_type": "1", "metadata": {"window": "9\n2.2.2 Garbage in, garbage out . . . . . . . . . ", "original_text": ". "}, "hash": "41eef5ba78abe189a7e6f5e32c2629cc439a74691b43383761a9bd2a6ecc3b04", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c5d5e7fa-16be-4165-a8fc-d0d72b36a255": {"__data__": {"id_": "c5d5e7fa-16be-4165-a8fc-d0d72b36a255", "embedding": null, "metadata": {"window": "9\n2.2.2 Garbage in, garbage out . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "61ce1819-7253-4549-856e-eba686e56983", "node_type": "1", "metadata": {"window": ". 9\n2.2.2 Garbage in, garbage out . . . . . . . . ", "original_text": ". "}, "hash": "ce1331beac761fafc4d3fab301805eb35c11fe0778aa6101983faca4311c97fa", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "40583b0d-9815-4e55-8c22-e088caf3ae16", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "40583b0d-9815-4e55-8c22-e088caf3ae16": {"__data__": {"id_": "40583b0d-9815-4e55-8c22-e088caf3ae16", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c5d5e7fa-16be-4165-a8fc-d0d72b36a255", "node_type": "1", "metadata": {"window": "9\n2.2.2 Garbage in, garbage out . . . . . . . . . ", "original_text": ". "}, "hash": "41eef5ba78abe189a7e6f5e32c2629cc439a74691b43383761a9bd2a6ecc3b04", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "86445103-3997-42ca-bbc1-1cb2f6df273e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "86445103-3997-42ca-bbc1-1cb2f6df273e": {"__data__": {"id_": "86445103-3997-42ca-bbc1-1cb2f6df273e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "40583b0d-9815-4e55-8c22-e088caf3ae16", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7": {"__data__": {"id_": "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "86445103-3997-42ca-bbc1-1cb2f6df273e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "612dc8be-abd2-4f34-bba9-f37c461f9ebb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "612dc8be-abd2-4f34-bba9-f37c461f9ebb": {"__data__": {"id_": "612dc8be-abd2-4f34-bba9-f37c461f9ebb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d3b81710-74f6-40e8-b4c7-b85558a4cf5c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d3b81710-74f6-40e8-b4c7-b85558a4cf5c": {"__data__": {"id_": "d3b81710-74f6-40e8-b4c7-b85558a4cf5c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "612dc8be-abd2-4f34-bba9-f37c461f9ebb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "45dbe881-b7d9-432c-8eeb-97c457995be4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "45dbe881-b7d9-432c-8eeb-97c457995be4": {"__data__": {"id_": "45dbe881-b7d9-432c-8eeb-97c457995be4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d3b81710-74f6-40e8-b4c7-b85558a4cf5c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "95569fe4-6b1a-4b8f-8645-6a1518ebf147", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "95569fe4-6b1a-4b8f-8645-6a1518ebf147": {"__data__": {"id_": "95569fe4-6b1a-4b8f-8645-6a1518ebf147", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "45dbe881-b7d9-432c-8eeb-97c457995be4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "81da4f34-e765-41ea-aff5-9bf225435cfe", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "81da4f34-e765-41ea-aff5-9bf225435cfe": {"__data__": {"id_": "81da4f34-e765-41ea-aff5-9bf225435cfe", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "95569fe4-6b1a-4b8f-8645-6a1518ebf147", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "455116ac-4f68-4141-89b5-2eb940fccc32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "455116ac-4f68-4141-89b5-2eb940fccc32": {"__data__": {"id_": "455116ac-4f68-4141-89b5-2eb940fccc32", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "81da4f34-e765-41ea-aff5-9bf225435cfe", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "41f650f4-924c-4cf9-981e-77834d0df780", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "41f650f4-924c-4cf9-981e-77834d0df780": {"__data__": {"id_": "41f650f4-924c-4cf9-981e-77834d0df780", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "455116ac-4f68-4141-89b5-2eb940fccc32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9546716f-ed71-4f4c-9a1e-235940d6c249", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9546716f-ed71-4f4c-9a1e-235940d6c249": {"__data__": {"id_": "9546716f-ed71-4f4c-9a1e-235940d6c249", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "41f650f4-924c-4cf9-981e-77834d0df780", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "57f7a476-d8f8-468f-97f7-c9f6cce97cb9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "57f7a476-d8f8-468f-97f7-c9f6cce97cb9": {"__data__": {"id_": "57f7a476-d8f8-468f-97f7-c9f6cce97cb9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9546716f-ed71-4f4c-9a1e-235940d6c249", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0dae5e39-ab6e-4e64-b754-288903aa7a4c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0dae5e39-ab6e-4e64-b754-288903aa7a4c": {"__data__": {"id_": "0dae5e39-ab6e-4e64-b754-288903aa7a4c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "57f7a476-d8f8-468f-97f7-c9f6cce97cb9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b2a13496-06f2-4a06-b9fa-ed9391a16b12", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b2a13496-06f2-4a06-b9fa-ed9391a16b12": {"__data__": {"id_": "b2a13496-06f2-4a06-b9fa-ed9391a16b12", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0dae5e39-ab6e-4e64-b754-288903aa7a4c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "66ac48b3-0183-4cf8-a9af-0864cb6b82f0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "66ac48b3-0183-4cf8-a9af-0864cb6b82f0": {"__data__": {"id_": "66ac48b3-0183-4cf8-a9af-0864cb6b82f0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b2a13496-06f2-4a06-b9fa-ed9391a16b12", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d": {"__data__": {"id_": "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "66ac48b3-0183-4cf8-a9af-0864cb6b82f0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7fdc2a59-9008-44f7-991c-3f8b53b26a96", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7fdc2a59-9008-44f7-991c-3f8b53b26a96": {"__data__": {"id_": "7fdc2a59-9008-44f7-991c-3f8b53b26a96", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "364d956b-896a-45c7-8286-bdbfafcd59d2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "364d956b-896a-45c7-8286-bdbfafcd59d2": {"__data__": {"id_": "364d956b-896a-45c7-8286-bdbfafcd59d2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7fdc2a59-9008-44f7-991c-3f8b53b26a96", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c3ca74d7-b4c5-4a14-9040-c12dd722777e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c3ca74d7-b4c5-4a14-9040-c12dd722777e": {"__data__": {"id_": "c3ca74d7-b4c5-4a14-9040-c12dd722777e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "364d956b-896a-45c7-8286-bdbfafcd59d2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a7f369c1-654c-4aac-b1f7-80cabff1be14", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a7f369c1-654c-4aac-b1f7-80cabff1be14": {"__data__": {"id_": "a7f369c1-654c-4aac-b1f7-80cabff1be14", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c3ca74d7-b4c5-4a14-9040-c12dd722777e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "690662df-c4ee-4adb-82a3-ec607bf3a4e4", "node_type": "1", "metadata": {"window": ". . . . . . . . 10\n2.2.3 Training data synthesis . ", "original_text": ". "}, "hash": "eafcde22cbf0b2e6552e8bbded61e87bd84c66c0b12823b64cecadb8b9f40713", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "690662df-c4ee-4adb-82a3-ec607bf3a4e4": {"__data__": {"id_": "690662df-c4ee-4adb-82a3-ec607bf3a4e4", "embedding": null, "metadata": {"window": ". . . . . . . . 10\n2.2.3 Training data synthesis . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a7f369c1-654c-4aac-b1f7-80cabff1be14", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf", "node_type": "1", "metadata": {"window": ". . . . . . . 10\n2.2.3 Training data synthesis . . ", "original_text": ". "}, "hash": "6d7c53c886eafec343f727d915eb4513146e80bec49f27f58f9919ba8fde74bf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf": {"__data__": {"id_": "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf", "embedding": null, "metadata": {"window": ". . . . . . . 10\n2.2.3 Training data synthesis . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "690662df-c4ee-4adb-82a3-ec607bf3a4e4", "node_type": "1", "metadata": {"window": ". . . . . . . . 10\n2.2.3 Training data synthesis . ", "original_text": ". "}, "hash": "eafcde22cbf0b2e6552e8bbded61e87bd84c66c0b12823b64cecadb8b9f40713", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "182c9022-0a66-4a54-a0b1-896409b156f1", "node_type": "1", "metadata": {"window": ". . . . . . 10\n2.2.3 Training data synthesis . . . ", "original_text": ". "}, "hash": "5526f70abce0ab510c32ec0a0718163dd086bc01fc463e32d94371504e6847f0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "182c9022-0a66-4a54-a0b1-896409b156f1": {"__data__": {"id_": "182c9022-0a66-4a54-a0b1-896409b156f1", "embedding": null, "metadata": {"window": ". . . . . . 10\n2.2.3 Training data synthesis . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf", "node_type": "1", "metadata": {"window": ". . . . . . . 10\n2.2.3 Training data synthesis . . ", "original_text": ". "}, "hash": "6d7c53c886eafec343f727d915eb4513146e80bec49f27f58f9919ba8fde74bf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e3e51562-d148-440a-b717-70541ab2a372", "node_type": "1", "metadata": {"window": ". . . . . 10\n2.2.3 Training data synthesis . . . . ", "original_text": ". "}, "hash": "ac2efb2a863903af7110505df58116f213136c5eedb8d2529a10ba3cf46f6735", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e3e51562-d148-440a-b717-70541ab2a372": {"__data__": {"id_": "e3e51562-d148-440a-b717-70541ab2a372", "embedding": null, "metadata": {"window": ". . . . . 10\n2.2.3 Training data synthesis . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "182c9022-0a66-4a54-a0b1-896409b156f1", "node_type": "1", "metadata": {"window": ". . . . . . 10\n2.2.3 Training data synthesis . . . ", "original_text": ". "}, "hash": "5526f70abce0ab510c32ec0a0718163dd086bc01fc463e32d94371504e6847f0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d063a6f3-7844-4591-80af-72014570c8ff", "node_type": "1", "metadata": {"window": ". . . . 10\n2.2.3 Training data synthesis . . . . . ", "original_text": "10\n2.2.3 Training data synthesis . "}, "hash": "c8df9e2011406e407db788a55fcc6cda3abd0edc938d5a7da0087492507c21bb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d063a6f3-7844-4591-80af-72014570c8ff": {"__data__": {"id_": "d063a6f3-7844-4591-80af-72014570c8ff", "embedding": null, "metadata": {"window": ". . . . 10\n2.2.3 Training data synthesis . . . . . ", "original_text": "10\n2.2.3 Training data synthesis . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e3e51562-d148-440a-b717-70541ab2a372", "node_type": "1", "metadata": {"window": ". . . . . 10\n2.2.3 Training data synthesis . . . . ", "original_text": ". "}, "hash": "ac2efb2a863903af7110505df58116f213136c5eedb8d2529a10ba3cf46f6735", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7fb0839c-bd58-41ba-862b-a88239c304b8", "node_type": "1", "metadata": {"window": ". . . 10\n2.2.3 Training data synthesis . . . . . . ", "original_text": ". "}, "hash": "74f6b87e3c26ecc362ff6c8b5fde56d84ff716b24816044a70c5e6a5cba883ca", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "10\n2.2.3 Training data synthesis . ", "mimetype": "text/plain", "start_char_idx": 882, "end_char_idx": 917, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7fb0839c-bd58-41ba-862b-a88239c304b8": {"__data__": {"id_": "7fb0839c-bd58-41ba-862b-a88239c304b8", "embedding": null, "metadata": {"window": ". . . 10\n2.2.3 Training data synthesis . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d063a6f3-7844-4591-80af-72014570c8ff", "node_type": "1", "metadata": {"window": ". . . . 10\n2.2.3 Training data synthesis . . . . . ", "original_text": "10\n2.2.3 Training data synthesis . "}, "hash": "c8df9e2011406e407db788a55fcc6cda3abd0edc938d5a7da0087492507c21bb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e7405003-dec5-48d0-923c-b11dddcb8c17", "node_type": "1", "metadata": {"window": ". . 10\n2.2.3 Training data synthesis . . . . . . . ", "original_text": ". "}, "hash": "d1fcd6d1c72e9a225e50fbf2ecba3db7a1986b4918e1f50209552d7c243d76a8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e7405003-dec5-48d0-923c-b11dddcb8c17": {"__data__": {"id_": "e7405003-dec5-48d0-923c-b11dddcb8c17", "embedding": null, "metadata": {"window": ". . 10\n2.2.3 Training data synthesis . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7fb0839c-bd58-41ba-862b-a88239c304b8", "node_type": "1", "metadata": {"window": ". . . 10\n2.2.3 Training data synthesis . . . . . . ", "original_text": ". "}, "hash": "74f6b87e3c26ecc362ff6c8b5fde56d84ff716b24816044a70c5e6a5cba883ca", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "62e4511f-14dd-4909-8f85-ce7e46749849", "node_type": "1", "metadata": {"window": ". 10\n2.2.3 Training data synthesis . . . . . . . . ", "original_text": ". "}, "hash": "d1a8b270d1b201aa16b57f856ce37d65e5dd3b95a4c3feba35e7d30df2f1daba", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "62e4511f-14dd-4909-8f85-ce7e46749849": {"__data__": {"id_": "62e4511f-14dd-4909-8f85-ce7e46749849", "embedding": null, "metadata": {"window": ". 10\n2.2.3 Training data synthesis . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e7405003-dec5-48d0-923c-b11dddcb8c17", "node_type": "1", "metadata": {"window": ". . 10\n2.2.3 Training data synthesis . . . . . . . ", "original_text": ". "}, "hash": "d1fcd6d1c72e9a225e50fbf2ecba3db7a1986b4918e1f50209552d7c243d76a8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aec8b991-d807-4438-801a-4a1eab70ceb4", "node_type": "1", "metadata": {"window": "10\n2.2.3 Training data synthesis . . . . . . . . . ", "original_text": ". "}, "hash": "601b2dba29fa3ceccb02f7c706e7c9ce8e243f01df9c58e1599d52e0aff12179", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aec8b991-d807-4438-801a-4a1eab70ceb4": {"__data__": {"id_": "aec8b991-d807-4438-801a-4a1eab70ceb4", "embedding": null, "metadata": {"window": "10\n2.2.3 Training data synthesis . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "62e4511f-14dd-4909-8f85-ce7e46749849", "node_type": "1", "metadata": {"window": ". 10\n2.2.3 Training data synthesis . . . . . . . . ", "original_text": ". "}, "hash": "d1a8b270d1b201aa16b57f856ce37d65e5dd3b95a4c3feba35e7d30df2f1daba", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "faa6f58d-1705-4523-95d6-f9cd2d181d5a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "faa6f58d-1705-4523-95d6-f9cd2d181d5a": {"__data__": {"id_": "faa6f58d-1705-4523-95d6-f9cd2d181d5a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aec8b991-d807-4438-801a-4a1eab70ceb4", "node_type": "1", "metadata": {"window": "10\n2.2.3 Training data synthesis . . . . . . . . . ", "original_text": ". "}, "hash": "601b2dba29fa3ceccb02f7c706e7c9ce8e243f01df9c58e1599d52e0aff12179", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "186ae934-b2fd-46ec-9f8c-bf30af97f134", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "186ae934-b2fd-46ec-9f8c-bf30af97f134": {"__data__": {"id_": "186ae934-b2fd-46ec-9f8c-bf30af97f134", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "faa6f58d-1705-4523-95d6-f9cd2d181d5a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "096db7f6-33fa-4ee9-8ffa-90eb58bb3293", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "096db7f6-33fa-4ee9-8ffa-90eb58bb3293": {"__data__": {"id_": "096db7f6-33fa-4ee9-8ffa-90eb58bb3293", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "186ae934-b2fd-46ec-9f8c-bf30af97f134", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6176aafa-9309-49f9-93df-634e6f34ab8f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6176aafa-9309-49f9-93df-634e6f34ab8f": {"__data__": {"id_": "6176aafa-9309-49f9-93df-634e6f34ab8f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "096db7f6-33fa-4ee9-8ffa-90eb58bb3293", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "07285404-98ce-435b-9069-053dea5eecb0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "07285404-98ce-435b-9069-053dea5eecb0": {"__data__": {"id_": "07285404-98ce-435b-9069-053dea5eecb0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6176aafa-9309-49f9-93df-634e6f34ab8f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "700b820d-7a5e-48e1-808a-0ab0aac3e1e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "700b820d-7a5e-48e1-808a-0ab0aac3e1e6": {"__data__": {"id_": "700b820d-7a5e-48e1-808a-0ab0aac3e1e6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "07285404-98ce-435b-9069-053dea5eecb0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cf7ca23f-6838-4ff1-abab-dd443b84da37", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cf7ca23f-6838-4ff1-abab-dd443b84da37": {"__data__": {"id_": "cf7ca23f-6838-4ff1-abab-dd443b84da37", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "700b820d-7a5e-48e1-808a-0ab0aac3e1e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77": {"__data__": {"id_": "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cf7ca23f-6838-4ff1-abab-dd443b84da37", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "03c5c698-303d-4a5d-ab8e-323dcd3829d2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "03c5c698-303d-4a5d-ab8e-323dcd3829d2": {"__data__": {"id_": "03c5c698-303d-4a5d-ab8e-323dcd3829d2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f88f8fa4-f946-4e76-a3dc-fe22584eb647", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f88f8fa4-f946-4e76-a3dc-fe22584eb647": {"__data__": {"id_": "f88f8fa4-f946-4e76-a3dc-fe22584eb647", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "03c5c698-303d-4a5d-ab8e-323dcd3829d2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a3f13bc9-118d-4a26-b51b-26386fde6fa6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a3f13bc9-118d-4a26-b51b-26386fde6fa6": {"__data__": {"id_": "a3f13bc9-118d-4a26-b51b-26386fde6fa6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f88f8fa4-f946-4e76-a3dc-fe22584eb647", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6abf387b-a0d9-4a24-91b0-5036cc14c805", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6abf387b-a0d9-4a24-91b0-5036cc14c805": {"__data__": {"id_": "6abf387b-a0d9-4a24-91b0-5036cc14c805", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a3f13bc9-118d-4a26-b51b-26386fde6fa6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6495d3fc-101d-4694-905b-5575e3fbb2b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6495d3fc-101d-4694-905b-5575e3fbb2b7": {"__data__": {"id_": "6495d3fc-101d-4694-905b-5575e3fbb2b7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6abf387b-a0d9-4a24-91b0-5036cc14c805", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c4fb9d84-e7a6-4466-b211-3280cc9cc0df", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c4fb9d84-e7a6-4466-b211-3280cc9cc0df": {"__data__": {"id_": "c4fb9d84-e7a6-4466-b211-3280cc9cc0df", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6495d3fc-101d-4694-905b-5575e3fbb2b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42895f62-3e8d-4395-9264-1f3786934b7a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42895f62-3e8d-4395-9264-1f3786934b7a": {"__data__": {"id_": "42895f62-3e8d-4395-9264-1f3786934b7a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c4fb9d84-e7a6-4466-b211-3280cc9cc0df", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "23b9910e-3887-440a-b3ba-9c05b5f1d71b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "23b9910e-3887-440a-b3ba-9c05b5f1d71b": {"__data__": {"id_": "23b9910e-3887-440a-b3ba-9c05b5f1d71b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42895f62-3e8d-4395-9264-1f3786934b7a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1062cc1e-28ed-4b37-8465-960e9c7316d7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1062cc1e-28ed-4b37-8465-960e9c7316d7": {"__data__": {"id_": "1062cc1e-28ed-4b37-8465-960e9c7316d7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "23b9910e-3887-440a-b3ba-9c05b5f1d71b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c": {"__data__": {"id_": "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1062cc1e-28ed-4b37-8465-960e9c7316d7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3b787eaf-97de-457c-a1f3-166f640a7416", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3b787eaf-97de-457c-a1f3-166f640a7416": {"__data__": {"id_": "3b787eaf-97de-457c-a1f3-166f640a7416", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "432ca6c3-1af0-4b51-bc4a-79781cce183a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "432ca6c3-1af0-4b51-bc4a-79781cce183a": {"__data__": {"id_": "432ca6c3-1af0-4b51-bc4a-79781cce183a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3b787eaf-97de-457c-a1f3-166f640a7416", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3488e2e5-1638-4be5-b066-5b50629fd121", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3488e2e5-1638-4be5-b066-5b50629fd121": {"__data__": {"id_": "3488e2e5-1638-4be5-b066-5b50629fd121", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "432ca6c3-1af0-4b51-bc4a-79781cce183a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "df97ac8c-1bd5-48af-82d9-0b2acea45d95", "node_type": "1", "metadata": {"window": ". . . . . . . . 11\n2.3 Model architecture and training recipe . ", "original_text": ". "}, "hash": "66853bac961236c6ad32e31a119f89d5ba0ac38ebf283c294be13178e8195ce4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "df97ac8c-1bd5-48af-82d9-0b2acea45d95": {"__data__": {"id_": "df97ac8c-1bd5-48af-82d9-0b2acea45d95", "embedding": null, "metadata": {"window": ". . . . . . . . 11\n2.3 Model architecture and training recipe . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3488e2e5-1638-4be5-b066-5b50629fd121", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e213a7d8-d437-440e-ad01-60172f77d1ff", "node_type": "1", "metadata": {"window": ". . . . . . . 11\n2.3 Model architecture and training recipe . . ", "original_text": ". "}, "hash": "8d441cfe16d341bc09df075a2d006841eee43a85408ce3ae3b1338653c49f2fd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e213a7d8-d437-440e-ad01-60172f77d1ff": {"__data__": {"id_": "e213a7d8-d437-440e-ad01-60172f77d1ff", "embedding": null, "metadata": {"window": ". . . . . . . 11\n2.3 Model architecture and training recipe . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "df97ac8c-1bd5-48af-82d9-0b2acea45d95", "node_type": "1", "metadata": {"window": ". . . . . . . . 11\n2.3 Model architecture and training recipe . ", "original_text": ". "}, "hash": "66853bac961236c6ad32e31a119f89d5ba0ac38ebf283c294be13178e8195ce4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d89b7b29-9b71-4a15-a004-6e5ab6242af3", "node_type": "1", "metadata": {"window": ". . . . . . 11\n2.3 Model architecture and training recipe . . . ", "original_text": ". "}, "hash": "3349bbdc8e7f15a75f003eeab445127ccded08363931fe81f339ac28e3292932", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d89b7b29-9b71-4a15-a004-6e5ab6242af3": {"__data__": {"id_": "d89b7b29-9b71-4a15-a004-6e5ab6242af3", "embedding": null, "metadata": {"window": ". . . . . . 11\n2.3 Model architecture and training recipe . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e213a7d8-d437-440e-ad01-60172f77d1ff", "node_type": "1", "metadata": {"window": ". . . . . . . 11\n2.3 Model architecture and training recipe . . ", "original_text": ". "}, "hash": "8d441cfe16d341bc09df075a2d006841eee43a85408ce3ae3b1338653c49f2fd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42616817-c368-418b-b4c3-f67435396f9b", "node_type": "1", "metadata": {"window": ". . . . . 11\n2.3 Model architecture and training recipe . . . . ", "original_text": ". "}, "hash": "afda123cb10b02426a0114d35d8ad7413206eeccda573884ec1300f88177e6af", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42616817-c368-418b-b4c3-f67435396f9b": {"__data__": {"id_": "42616817-c368-418b-b4c3-f67435396f9b", "embedding": null, "metadata": {"window": ". . . . . 11\n2.3 Model architecture and training recipe . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d89b7b29-9b71-4a15-a004-6e5ab6242af3", "node_type": "1", "metadata": {"window": ". . . . . . 11\n2.3 Model architecture and training recipe . . . ", "original_text": ". "}, "hash": "3349bbdc8e7f15a75f003eeab445127ccded08363931fe81f339ac28e3292932", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cec812b8-07bd-474c-a35c-e2a17a85970d", "node_type": "1", "metadata": {"window": ". . . . 11\n2.3 Model architecture and training recipe . . . . . ", "original_text": "11\n2.3 Model architecture and training recipe . "}, "hash": "3687c7d305423115b01bd80a509365142150f3def0744d0cf284180e874826a2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cec812b8-07bd-474c-a35c-e2a17a85970d": {"__data__": {"id_": "cec812b8-07bd-474c-a35c-e2a17a85970d", "embedding": null, "metadata": {"window": ". . . . 11\n2.3 Model architecture and training recipe . . . . . ", "original_text": "11\n2.3 Model architecture and training recipe . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42616817-c368-418b-b4c3-f67435396f9b", "node_type": "1", "metadata": {"window": ". . . . . 11\n2.3 Model architecture and training recipe . . . . ", "original_text": ". "}, "hash": "afda123cb10b02426a0114d35d8ad7413206eeccda573884ec1300f88177e6af", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4588df1e-9e69-490c-b75e-c136b3d73229", "node_type": "1", "metadata": {"window": ". . . 11\n2.3 Model architecture and training recipe . . . . . . ", "original_text": ". "}, "hash": "8fb8eac9b8b59cc7c6a28e6e40157602fec6743f93256ddd903a92f8f484b3da", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "11\n2.3 Model architecture and training recipe . ", "mimetype": "text/plain", "start_char_idx": 975, "end_char_idx": 1023, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4588df1e-9e69-490c-b75e-c136b3d73229": {"__data__": {"id_": "4588df1e-9e69-490c-b75e-c136b3d73229", "embedding": null, "metadata": {"window": ". . . 11\n2.3 Model architecture and training recipe . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cec812b8-07bd-474c-a35c-e2a17a85970d", "node_type": "1", "metadata": {"window": ". . . . 11\n2.3 Model architecture and training recipe . . . . . ", "original_text": "11\n2.3 Model architecture and training recipe . "}, "hash": "3687c7d305423115b01bd80a509365142150f3def0744d0cf284180e874826a2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "219cd3af-167d-405e-adf2-86ec7dace7ac", "node_type": "1", "metadata": {"window": ". . 11\n2.3 Model architecture and training recipe . . . . . . . ", "original_text": ". "}, "hash": "13af3b242db8a50aeb02d8498508cf97182bc39462a93b3655b56851e7aeb66e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "219cd3af-167d-405e-adf2-86ec7dace7ac": {"__data__": {"id_": "219cd3af-167d-405e-adf2-86ec7dace7ac", "embedding": null, "metadata": {"window": ". . 11\n2.3 Model architecture and training recipe . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4588df1e-9e69-490c-b75e-c136b3d73229", "node_type": "1", "metadata": {"window": ". . . 11\n2.3 Model architecture and training recipe . . . . . . ", "original_text": ". "}, "hash": "8fb8eac9b8b59cc7c6a28e6e40157602fec6743f93256ddd903a92f8f484b3da", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "35e9a18d-e69a-4e00-a2a5-658b1afa2e07", "node_type": "1", "metadata": {"window": ". 11\n2.3 Model architecture and training recipe . . . . . . . . ", "original_text": ". "}, "hash": "83a1d764200d7621bd74915435eb7b4b370113d8713c451cab391891a08c49d1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "35e9a18d-e69a-4e00-a2a5-658b1afa2e07": {"__data__": {"id_": "35e9a18d-e69a-4e00-a2a5-658b1afa2e07", "embedding": null, "metadata": {"window": ". 11\n2.3 Model architecture and training recipe . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "219cd3af-167d-405e-adf2-86ec7dace7ac", "node_type": "1", "metadata": {"window": ". . 11\n2.3 Model architecture and training recipe . . . . . . . ", "original_text": ". "}, "hash": "13af3b242db8a50aeb02d8498508cf97182bc39462a93b3655b56851e7aeb66e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cc6683c9-d7c2-43ff-a762-e345d7ab280f", "node_type": "1", "metadata": {"window": "11\n2.3 Model architecture and training recipe . . . . . . . . . ", "original_text": ". "}, "hash": "4358c106d6cb63308ff5ada0da3ec316d30dc4577083d1f79893f8409cd88e01", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cc6683c9-d7c2-43ff-a762-e345d7ab280f": {"__data__": {"id_": "cc6683c9-d7c2-43ff-a762-e345d7ab280f", "embedding": null, "metadata": {"window": "11\n2.3 Model architecture and training recipe . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "35e9a18d-e69a-4e00-a2a5-658b1afa2e07", "node_type": "1", "metadata": {"window": ". 11\n2.3 Model architecture and training recipe . . . . . . . . ", "original_text": ". "}, "hash": "83a1d764200d7621bd74915435eb7b4b370113d8713c451cab391891a08c49d1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "14f92f7b-213b-4925-a0c1-750522b2d0cf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "14f92f7b-213b-4925-a0c1-750522b2d0cf": {"__data__": {"id_": "14f92f7b-213b-4925-a0c1-750522b2d0cf", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cc6683c9-d7c2-43ff-a762-e345d7ab280f", "node_type": "1", "metadata": {"window": "11\n2.3 Model architecture and training recipe . . . . . . . . . ", "original_text": ". "}, "hash": "4358c106d6cb63308ff5ada0da3ec316d30dc4577083d1f79893f8409cd88e01", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ca9658ae-d6d5-4b79-8baa-445d8c194af2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ca9658ae-d6d5-4b79-8baa-445d8c194af2": {"__data__": {"id_": "ca9658ae-d6d5-4b79-8baa-445d8c194af2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "14f92f7b-213b-4925-a0c1-750522b2d0cf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "97afdf3a-8f6b-4bba-9def-2e12251908bf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "97afdf3a-8f6b-4bba-9def-2e12251908bf": {"__data__": {"id_": "97afdf3a-8f6b-4bba-9def-2e12251908bf", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ca9658ae-d6d5-4b79-8baa-445d8c194af2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4": {"__data__": {"id_": "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "97afdf3a-8f6b-4bba-9def-2e12251908bf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "69af4999-2384-4e5b-a2ed-f8a50723499e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "69af4999-2384-4e5b-a2ed-f8a50723499e": {"__data__": {"id_": "69af4999-2384-4e5b-a2ed-f8a50723499e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "59d365ed-06ab-4c1a-96d6-887862ad2c62", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "59d365ed-06ab-4c1a-96d6-887862ad2c62": {"__data__": {"id_": "59d365ed-06ab-4c1a-96d6-887862ad2c62", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "69af4999-2384-4e5b-a2ed-f8a50723499e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3aff52a9-0551-48c7-a643-57cbd40168b0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3aff52a9-0551-48c7-a643-57cbd40168b0": {"__data__": {"id_": "3aff52a9-0551-48c7-a643-57cbd40168b0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "59d365ed-06ab-4c1a-96d6-887862ad2c62", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "93ac8471-95ef-49c9-8d81-d35fac819a2b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "93ac8471-95ef-49c9-8d81-d35fac819a2b": {"__data__": {"id_": "93ac8471-95ef-49c9-8d81-d35fac819a2b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3aff52a9-0551-48c7-a643-57cbd40168b0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b8e0698a-9c55-475b-af11-efd14e30a941", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b8e0698a-9c55-475b-af11-efd14e30a941": {"__data__": {"id_": "b8e0698a-9c55-475b-af11-efd14e30a941", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "93ac8471-95ef-49c9-8d81-d35fac819a2b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "880d5c0d-a4a2-45ff-b26a-e9d19d12a055", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "880d5c0d-a4a2-45ff-b26a-e9d19d12a055": {"__data__": {"id_": "880d5c0d-a4a2-45ff-b26a-e9d19d12a055", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b8e0698a-9c55-475b-af11-efd14e30a941", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "794d6a0c-089a-4a9f-8170-29c20b3ec300", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "794d6a0c-089a-4a9f-8170-29c20b3ec300": {"__data__": {"id_": "794d6a0c-089a-4a9f-8170-29c20b3ec300", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "880d5c0d-a4a2-45ff-b26a-e9d19d12a055", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970": {"__data__": {"id_": "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "794d6a0c-089a-4a9f-8170-29c20b3ec300", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "811e812b-53e4-48c5-85f5-ad9c130b1f5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "811e812b-53e4-48c5-85f5-ad9c130b1f5b": {"__data__": {"id_": "811e812b-53e4-48c5-85f5-ad9c130b1f5b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "757b2a07-cb08-43e4-b855-915e793cbc7d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "757b2a07-cb08-43e4-b855-915e793cbc7d": {"__data__": {"id_": "757b2a07-cb08-43e4-b855-915e793cbc7d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "811e812b-53e4-48c5-85f5-ad9c130b1f5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1f81c1a7-c32d-49b4-be38-48559bae2c10", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1f81c1a7-c32d-49b4-be38-48559bae2c10": {"__data__": {"id_": "1f81c1a7-c32d-49b4-be38-48559bae2c10", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "757b2a07-cb08-43e4-b855-915e793cbc7d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959": {"__data__": {"id_": "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1f81c1a7-c32d-49b4-be38-48559bae2c10", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1", "node_type": "1", "metadata": {"window": ". . . . . . . . 13\n2.4 Results . ", "original_text": ". "}, "hash": "ec2be32330a7fa69a76635d58c2a806742a6bc82bbb60828586960027dbfa6b6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1": {"__data__": {"id_": "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1", "embedding": null, "metadata": {"window": ". . . . . . . . 13\n2.4 Results . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1", "node_type": "1", "metadata": {"window": ". . . . . . . 13\n2.4 Results . . ", "original_text": ". "}, "hash": "19937be2f0a5784209586333ffe52ffe7e36b4431e9d2cecd98e4113773e4a07", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1": {"__data__": {"id_": "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1", "embedding": null, "metadata": {"window": ". . . . . . . 13\n2.4 Results . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1", "node_type": "1", "metadata": {"window": ". . . . . . . . 13\n2.4 Results . ", "original_text": ". "}, "hash": "ec2be32330a7fa69a76635d58c2a806742a6bc82bbb60828586960027dbfa6b6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "00404da5-017b-45e1-9395-ce4bebd3c9e8", "node_type": "1", "metadata": {"window": ". . . . . . 13\n2.4 Results . . . ", "original_text": ". "}, "hash": "4e935d052048f31fb746a91439449eff4fc858dc7c35733e7716e1078cef18c2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "00404da5-017b-45e1-9395-ce4bebd3c9e8": {"__data__": {"id_": "00404da5-017b-45e1-9395-ce4bebd3c9e8", "embedding": null, "metadata": {"window": ". . . . . . 13\n2.4 Results . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1", "node_type": "1", "metadata": {"window": ". . . . . . . 13\n2.4 Results . . ", "original_text": ". "}, "hash": "19937be2f0a5784209586333ffe52ffe7e36b4431e9d2cecd98e4113773e4a07", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a25272fc-ad0b-4ee8-98f4-d95344e83cca", "node_type": "1", "metadata": {"window": ". . . . . 13\n2.4 Results . . . . ", "original_text": ". "}, "hash": "e02c3c1832255ddd9cfd3a2ba3d512d29d09a17fe0450dcab4e1c28f7443d403", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a25272fc-ad0b-4ee8-98f4-d95344e83cca": {"__data__": {"id_": "a25272fc-ad0b-4ee8-98f4-d95344e83cca", "embedding": null, "metadata": {"window": ". . . . . 13\n2.4 Results . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "00404da5-017b-45e1-9395-ce4bebd3c9e8", "node_type": "1", "metadata": {"window": ". . . . . . 13\n2.4 Results . . . ", "original_text": ". "}, "hash": "4e935d052048f31fb746a91439449eff4fc858dc7c35733e7716e1078cef18c2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0", "node_type": "1", "metadata": {"window": ". . . . 13\n2.4 Results . . . . . ", "original_text": "13\n2.4 Results . "}, "hash": "6ae7b4ee3b818d02ccb05a16910fc558610d1706673747da738030856f80662d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0": {"__data__": {"id_": "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0", "embedding": null, "metadata": {"window": ". . . . 13\n2.4 Results . . . . . ", "original_text": "13\n2.4 Results . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a25272fc-ad0b-4ee8-98f4-d95344e83cca", "node_type": "1", "metadata": {"window": ". . . . . 13\n2.4 Results . . . . ", "original_text": ". "}, "hash": "e02c3c1832255ddd9cfd3a2ba3d512d29d09a17fe0450dcab4e1c28f7443d403", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "955c63f1-3690-4d8e-85a1-b2be77dc98ce", "node_type": "1", "metadata": {"window": ". . . 13\n2.4 Results . . . . . . ", "original_text": ". "}, "hash": "fbfd95bc96f9b054b73e529defdd875e51377f1a313e0422213583de160f6efb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "13\n2.4 Results . ", "mimetype": "text/plain", "start_char_idx": 1071, "end_char_idx": 1088, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "955c63f1-3690-4d8e-85a1-b2be77dc98ce": {"__data__": {"id_": "955c63f1-3690-4d8e-85a1-b2be77dc98ce", "embedding": null, "metadata": {"window": ". . . 13\n2.4 Results . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0", "node_type": "1", "metadata": {"window": ". . . . 13\n2.4 Results . . . . . ", "original_text": "13\n2.4 Results . "}, "hash": "6ae7b4ee3b818d02ccb05a16910fc558610d1706673747da738030856f80662d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "babc6b05-6b92-4cec-aa10-fd20dc009ec9", "node_type": "1", "metadata": {"window": ". . 13\n2.4 Results . . . . . . . ", "original_text": ". "}, "hash": "2a3569ac91e29f7b6c1ddc5cbd96168ff2615d91e126f879626fd1e5d0781614", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "babc6b05-6b92-4cec-aa10-fd20dc009ec9": {"__data__": {"id_": "babc6b05-6b92-4cec-aa10-fd20dc009ec9", "embedding": null, "metadata": {"window": ". . 13\n2.4 Results . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "955c63f1-3690-4d8e-85a1-b2be77dc98ce", "node_type": "1", "metadata": {"window": ". . . 13\n2.4 Results . . . . . . ", "original_text": ". "}, "hash": "fbfd95bc96f9b054b73e529defdd875e51377f1a313e0422213583de160f6efb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "53cba9d9-7973-4bc8-9080-f2510ef7bc36", "node_type": "1", "metadata": {"window": ". 13\n2.4 Results . . . . . . . . ", "original_text": ". "}, "hash": "7a52a5d8e659f5759ab0a898f8422a05a7ea48a63ea158b7a11288f6995e2135", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "53cba9d9-7973-4bc8-9080-f2510ef7bc36": {"__data__": {"id_": "53cba9d9-7973-4bc8-9080-f2510ef7bc36", "embedding": null, "metadata": {"window": ". 13\n2.4 Results . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "babc6b05-6b92-4cec-aa10-fd20dc009ec9", "node_type": "1", "metadata": {"window": ". . 13\n2.4 Results . . . . . . . ", "original_text": ". "}, "hash": "2a3569ac91e29f7b6c1ddc5cbd96168ff2615d91e126f879626fd1e5d0781614", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec", "node_type": "1", "metadata": {"window": "13\n2.4 Results . . . . . . . . . ", "original_text": ". "}, "hash": "be91fb1c561b394e986b4d0e4c99f5747e6f2e98957d79e868b8dbccde6a811c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec": {"__data__": {"id_": "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec", "embedding": null, "metadata": {"window": "13\n2.4 Results . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "53cba9d9-7973-4bc8-9080-f2510ef7bc36", "node_type": "1", "metadata": {"window": ". 13\n2.4 Results . . . . . . . . ", "original_text": ". "}, "hash": "7a52a5d8e659f5759ab0a898f8422a05a7ea48a63ea158b7a11288f6995e2135", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9e0bbc2d-6193-48cd-a07c-80474253322a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9e0bbc2d-6193-48cd-a07c-80474253322a": {"__data__": {"id_": "9e0bbc2d-6193-48cd-a07c-80474253322a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec", "node_type": "1", "metadata": {"window": "13\n2.4 Results . . . . . . . . . ", "original_text": ". "}, "hash": "be91fb1c561b394e986b4d0e4c99f5747e6f2e98957d79e868b8dbccde6a811c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3f194125-734d-46f4-a9fd-ddd29f5e2183", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3f194125-734d-46f4-a9fd-ddd29f5e2183": {"__data__": {"id_": "3f194125-734d-46f4-a9fd-ddd29f5e2183", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9e0bbc2d-6193-48cd-a07c-80474253322a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "be1ce379-c349-40ad-afdc-71a7936e60c6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "be1ce379-c349-40ad-afdc-71a7936e60c6": {"__data__": {"id_": "be1ce379-c349-40ad-afdc-71a7936e60c6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3f194125-734d-46f4-a9fd-ddd29f5e2183", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c3296cd0-6365-4c5b-a6d0-411e8c3d0577", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c3296cd0-6365-4c5b-a6d0-411e8c3d0577": {"__data__": {"id_": "c3296cd0-6365-4c5b-a6d0-411e8c3d0577", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "be1ce379-c349-40ad-afdc-71a7936e60c6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "58aae4e0-a962-4b38-9a6e-a687ec00f30a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "58aae4e0-a962-4b38-9a6e-a687ec00f30a": {"__data__": {"id_": "58aae4e0-a962-4b38-9a6e-a687ec00f30a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c3296cd0-6365-4c5b-a6d0-411e8c3d0577", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "33fb6106-4d90-4c8e-a437-8a357e114dbd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "33fb6106-4d90-4c8e-a437-8a357e114dbd": {"__data__": {"id_": "33fb6106-4d90-4c8e-a437-8a357e114dbd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "58aae4e0-a962-4b38-9a6e-a687ec00f30a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "616c86b6-7abc-4204-aaba-a811e22464f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "616c86b6-7abc-4204-aaba-a811e22464f7": {"__data__": {"id_": "616c86b6-7abc-4204-aaba-a811e22464f7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "33fb6106-4d90-4c8e-a437-8a357e114dbd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "29cd9cd4-b945-4b31-a61b-a81974170bbf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "29cd9cd4-b945-4b31-a61b-a81974170bbf": {"__data__": {"id_": "29cd9cd4-b945-4b31-a61b-a81974170bbf", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "616c86b6-7abc-4204-aaba-a811e22464f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2aa81b72-9837-4a28-89c4-7263550df613", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2aa81b72-9837-4a28-89c4-7263550df613": {"__data__": {"id_": "2aa81b72-9837-4a28-89c4-7263550df613", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "29cd9cd4-b945-4b31-a61b-a81974170bbf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e": {"__data__": {"id_": "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2aa81b72-9837-4a28-89c4-7263550df613", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4fba19b2-0cce-4256-9f0c-99191571e2d1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4fba19b2-0cce-4256-9f0c-99191571e2d1": {"__data__": {"id_": "4fba19b2-0cce-4256-9f0c-99191571e2d1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0634617f-9e21-4686-959c-80dd984f5d11", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0634617f-9e21-4686-959c-80dd984f5d11": {"__data__": {"id_": "0634617f-9e21-4686-959c-80dd984f5d11", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4fba19b2-0cce-4256-9f0c-99191571e2d1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ff12285a-3231-40f9-99be-c91d13278f97", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ff12285a-3231-40f9-99be-c91d13278f97": {"__data__": {"id_": "ff12285a-3231-40f9-99be-c91d13278f97", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0634617f-9e21-4686-959c-80dd984f5d11", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "29c922cc-48a1-416a-966f-a3ffd0edeee5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "29c922cc-48a1-416a-966f-a3ffd0edeee5": {"__data__": {"id_": "29c922cc-48a1-416a-966f-a3ffd0edeee5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ff12285a-3231-40f9-99be-c91d13278f97", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "35072137-7803-4223-9b54-60db59633a2f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "35072137-7803-4223-9b54-60db59633a2f": {"__data__": {"id_": "35072137-7803-4223-9b54-60db59633a2f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "29c922cc-48a1-416a-966f-a3ffd0edeee5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "49faee0a-80fe-4fc7-a2be-8e51ff8d268c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "49faee0a-80fe-4fc7-a2be-8e51ff8d268c": {"__data__": {"id_": "49faee0a-80fe-4fc7-a2be-8e51ff8d268c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "35072137-7803-4223-9b54-60db59633a2f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2f5a8a63-1257-4665-9df1-b62bf40f4cd4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2f5a8a63-1257-4665-9df1-b62bf40f4cd4": {"__data__": {"id_": "2f5a8a63-1257-4665-9df1-b62bf40f4cd4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "49faee0a-80fe-4fc7-a2be-8e51ff8d268c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d55b5eff-093b-4db5-87ac-0becb7be3b3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d55b5eff-093b-4db5-87ac-0becb7be3b3f": {"__data__": {"id_": "d55b5eff-093b-4db5-87ac-0becb7be3b3f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2f5a8a63-1257-4665-9df1-b62bf40f4cd4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c50c2988-5b65-4178-a57c-59a678975f91", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c50c2988-5b65-4178-a57c-59a678975f91": {"__data__": {"id_": "c50c2988-5b65-4178-a57c-59a678975f91", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d55b5eff-093b-4db5-87ac-0becb7be3b3f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5986e111-543e-41f6-b606-acd521d4d469", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5986e111-543e-41f6-b606-acd521d4d469": {"__data__": {"id_": "5986e111-543e-41f6-b606-acd521d4d469", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c50c2988-5b65-4178-a57c-59a678975f91", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "95604239-1db6-4289-8b4b-157830379b87", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "95604239-1db6-4289-8b4b-157830379b87": {"__data__": {"id_": "95604239-1db6-4289-8b4b-157830379b87", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5986e111-543e-41f6-b606-acd521d4d469", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "67666908-fe9d-4d3e-a628-b012f49263ad", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "67666908-fe9d-4d3e-a628-b012f49263ad": {"__data__": {"id_": "67666908-fe9d-4d3e-a628-b012f49263ad", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "95604239-1db6-4289-8b4b-157830379b87", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9": {"__data__": {"id_": "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "67666908-fe9d-4d3e-a628-b012f49263ad", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "351bfba7-c40f-4b24-936c-695c8922400b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "351bfba7-c40f-4b24-936c-695c8922400b": {"__data__": {"id_": "351bfba7-c40f-4b24-936c-695c8922400b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a": {"__data__": {"id_": "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "351bfba7-c40f-4b24-936c-695c8922400b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "912e3efe-ce86-4e91-9af8-3699b294517a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "912e3efe-ce86-4e91-9af8-3699b294517a": {"__data__": {"id_": "912e3efe-ce86-4e91-9af8-3699b294517a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d9c1f9ec-ab20-4a1d-b35b-312c5299601f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d9c1f9ec-ab20-4a1d-b35b-312c5299601f": {"__data__": {"id_": "d9c1f9ec-ab20-4a1d-b35b-312c5299601f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "912e3efe-ce86-4e91-9af8-3699b294517a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e59e4445-a4bb-4179-bb7a-9137b7d7f70d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e59e4445-a4bb-4179-bb7a-9137b7d7f70d": {"__data__": {"id_": "e59e4445-a4bb-4179-bb7a-9137b7d7f70d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d9c1f9ec-ab20-4a1d-b35b-312c5299601f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "942783dd-0d25-47fb-82fe-d40113812f52", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "942783dd-0d25-47fb-82fe-d40113812f52": {"__data__": {"id_": "942783dd-0d25-47fb-82fe-d40113812f52", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e59e4445-a4bb-4179-bb7a-9137b7d7f70d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a": {"__data__": {"id_": "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "942783dd-0d25-47fb-82fe-d40113812f52", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bcbea5b5-fef2-40cf-acb3-84e513365e95", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bcbea5b5-fef2-40cf-acb3-84e513365e95": {"__data__": {"id_": "bcbea5b5-fef2-40cf-acb3-84e513365e95", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ab830d4e-5aee-40e7-9563-705aa609ee32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ab830d4e-5aee-40e7-9563-705aa609ee32": {"__data__": {"id_": "ab830d4e-5aee-40e7-9563-705aa609ee32", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bcbea5b5-fef2-40cf-acb3-84e513365e95", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4f9e84de-6297-417d-91fa-dd284764cb8c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4f9e84de-6297-417d-91fa-dd284764cb8c": {"__data__": {"id_": "4f9e84de-6297-417d-91fa-dd284764cb8c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ab830d4e-5aee-40e7-9563-705aa609ee32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "13557eac-bdb8-4e98-b6c9-6e1502fd8977", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "13557eac-bdb8-4e98-b6c9-6e1502fd8977": {"__data__": {"id_": "13557eac-bdb8-4e98-b6c9-6e1502fd8977", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4f9e84de-6297-417d-91fa-dd284764cb8c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2d4a2448-bd3f-47ef-8120-61f108a3931d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2d4a2448-bd3f-47ef-8120-61f108a3931d": {"__data__": {"id_": "2d4a2448-bd3f-47ef-8120-61f108a3931d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "13557eac-bdb8-4e98-b6c9-6e1502fd8977", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "515f35bc-6805-4539-8dd6-c0bc8442c7df", "node_type": "1", "metadata": {"window": ". . . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . ", "original_text": ". "}, "hash": "93ab575239f30a383746dc6d679a5262965359773b53d0981d3049168a7a480e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "515f35bc-6805-4539-8dd6-c0bc8442c7df": {"__data__": {"id_": "515f35bc-6805-4539-8dd6-c0bc8442c7df", "embedding": null, "metadata": {"window": ". . . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2d4a2448-bd3f-47ef-8120-61f108a3931d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7d3b68a9-9684-45bd-b513-5355cc342493", "node_type": "1", "metadata": {"window": ". . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . ", "original_text": ". "}, "hash": "007b9ce5a595f827bbc91c8dd39d487e519a06f527aeb45083d70cb645c754b7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7d3b68a9-9684-45bd-b513-5355cc342493": {"__data__": {"id_": "7d3b68a9-9684-45bd-b513-5355cc342493", "embedding": null, "metadata": {"window": ". . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "515f35bc-6805-4539-8dd6-c0bc8442c7df", "node_type": "1", "metadata": {"window": ". . . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . ", "original_text": ". "}, "hash": "93ab575239f30a383746dc6d679a5262965359773b53d0981d3049168a7a480e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0df8094a-0fc9-46d2-bde9-db17da7e0798", "node_type": "1", "metadata": {"window": ". . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . ", "original_text": ". "}, "hash": "968c63e03e2deb4aeeeaa8431afc6d87489e4ec8816a3e7149c3703eb629fb27", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0df8094a-0fc9-46d2-bde9-db17da7e0798": {"__data__": {"id_": "0df8094a-0fc9-46d2-bde9-db17da7e0798", "embedding": null, "metadata": {"window": ". . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7d3b68a9-9684-45bd-b513-5355cc342493", "node_type": "1", "metadata": {"window": ". . . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . ", "original_text": ". "}, "hash": "007b9ce5a595f827bbc91c8dd39d487e519a06f527aeb45083d70cb645c754b7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d200200a-c5dc-4197-a791-607008c88d14", "node_type": "1", "metadata": {"window": ". . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . ", "original_text": ". "}, "hash": "e48470f2fd6657e8a90eb30460b9bd267f9ff6d1ac0dc7d357e84dde65e87f20", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d200200a-c5dc-4197-a791-607008c88d14": {"__data__": {"id_": "d200200a-c5dc-4197-a791-607008c88d14", "embedding": null, "metadata": {"window": ". . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0df8094a-0fc9-46d2-bde9-db17da7e0798", "node_type": "1", "metadata": {"window": ". . . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . ", "original_text": ". "}, "hash": "968c63e03e2deb4aeeeaa8431afc6d87489e4ec8816a3e7149c3703eb629fb27", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ffd57124-4e66-446b-930a-d1498b742362", "node_type": "1", "metadata": {"window": ". . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . ", "original_text": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . "}, "hash": "f9dd41e2e99f660916828d673c51d860279cdd3fb883d6c75890cc6660a9dcdb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ffd57124-4e66-446b-930a-d1498b742362": {"__data__": {"id_": "ffd57124-4e66-446b-930a-d1498b742362", "embedding": null, "metadata": {"window": ". . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . ", "original_text": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d200200a-c5dc-4197-a791-607008c88d14", "node_type": "1", "metadata": {"window": ". . . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . ", "original_text": ". "}, "hash": "e48470f2fd6657e8a90eb30460b9bd267f9ff6d1ac0dc7d357e84dde65e87f20", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "69f8add1-0f66-4bd8-9706-b9f8bac2b648", "node_type": "1", "metadata": {"window": ". . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . ", "original_text": ". "}, "hash": "ca1affcabe12bf8ee16121e8c5e3e5f3c875e641e754023a97d277b531ba1895", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . ", "mimetype": "text/plain", "start_char_idx": 1174, "end_char_idx": 1228, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "69f8add1-0f66-4bd8-9706-b9f8bac2b648": {"__data__": {"id_": "69f8add1-0f66-4bd8-9706-b9f8bac2b648", "embedding": null, "metadata": {"window": ". . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ffd57124-4e66-446b-930a-d1498b742362", "node_type": "1", "metadata": {"window": ". . . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . ", "original_text": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . "}, "hash": "f9dd41e2e99f660916828d673c51d860279cdd3fb883d6c75890cc6660a9dcdb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "66d9162f-b0df-4d11-b4e6-232647daa677", "node_type": "1", "metadata": {"window": ". . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . ", "original_text": ". "}, "hash": "28e1d705964971f413654c074d001401e4be584259c495e2c4c548e54d87f570", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "66d9162f-b0df-4d11-b4e6-232647daa677": {"__data__": {"id_": "66d9162f-b0df-4d11-b4e6-232647daa677", "embedding": null, "metadata": {"window": ". . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "69f8add1-0f66-4bd8-9706-b9f8bac2b648", "node_type": "1", "metadata": {"window": ". . . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . ", "original_text": ". "}, "hash": "ca1affcabe12bf8ee16121e8c5e3e5f3c875e641e754023a97d277b531ba1895", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "155ce6f9-70b8-4b9d-b137-36b9951b8036", "node_type": "1", "metadata": {"window": ". 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . ", "original_text": ". "}, "hash": "dbef2fe6910adeda589d03ce71ee6add5f37e1503324de6e3c4d928e2fd19b26", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "155ce6f9-70b8-4b9d-b137-36b9951b8036": {"__data__": {"id_": "155ce6f9-70b8-4b9d-b137-36b9951b8036", "embedding": null, "metadata": {"window": ". 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "66d9162f-b0df-4d11-b4e6-232647daa677", "node_type": "1", "metadata": {"window": ". . 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . ", "original_text": ". "}, "hash": "28e1d705964971f413654c074d001401e4be584259c495e2c4c548e54d87f570", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b576c41e-e76f-46ca-b572-35158a29b8bf", "node_type": "1", "metadata": {"window": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . . ", "original_text": ". "}, "hash": "8c3e773a86884a93a8eb2ed8ba1fb8319cc3231068aec1487c6d0d5363dcf26b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b576c41e-e76f-46ca-b572-35158a29b8bf": {"__data__": {"id_": "b576c41e-e76f-46ca-b572-35158a29b8bf", "embedding": null, "metadata": {"window": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "155ce6f9-70b8-4b9d-b137-36b9951b8036", "node_type": "1", "metadata": {"window": ". 15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . ", "original_text": ". "}, "hash": "dbef2fe6910adeda589d03ce71ee6add5f37e1503324de6e3c4d928e2fd19b26", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d6a25275-5b62-492f-85af-622d4d555dd2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d6a25275-5b62-492f-85af-622d4d555dd2": {"__data__": {"id_": "d6a25275-5b62-492f-85af-622d4d555dd2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b576c41e-e76f-46ca-b572-35158a29b8bf", "node_type": "1", "metadata": {"window": "15\n3 Active Learning 19\n3.1 Measures of uncertainty . . . . . . . . . ", "original_text": ". "}, "hash": "8c3e773a86884a93a8eb2ed8ba1fb8319cc3231068aec1487c6d0d5363dcf26b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2a75dea0-7dbe-4161-ae51-b5fe341a666b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2a75dea0-7dbe-4161-ae51-b5fe341a666b": {"__data__": {"id_": "2a75dea0-7dbe-4161-ae51-b5fe341a666b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d6a25275-5b62-492f-85af-622d4d555dd2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5aa5aaee-7a64-49a8-a060-ede2373823a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5aa5aaee-7a64-49a8-a060-ede2373823a3": {"__data__": {"id_": "5aa5aaee-7a64-49a8-a060-ede2373823a3", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2a75dea0-7dbe-4161-ae51-b5fe341a666b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ee0bec49-a86a-4348-888b-79d6fce5b11c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ee0bec49-a86a-4348-888b-79d6fce5b11c": {"__data__": {"id_": "ee0bec49-a86a-4348-888b-79d6fce5b11c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5aa5aaee-7a64-49a8-a060-ede2373823a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e4740a38-96fa-47ae-a521-176c04c48be9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e4740a38-96fa-47ae-a521-176c04c48be9": {"__data__": {"id_": "e4740a38-96fa-47ae-a521-176c04c48be9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ee0bec49-a86a-4348-888b-79d6fce5b11c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc": {"__data__": {"id_": "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e4740a38-96fa-47ae-a521-176c04c48be9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "869f1ddf-9fc3-4f1f-aa3e-9b811531a093", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "869f1ddf-9fc3-4f1f-aa3e-9b811531a093": {"__data__": {"id_": "869f1ddf-9fc3-4f1f-aa3e-9b811531a093", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d2ea7cc0-8426-4708-af74-94d178188962", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d2ea7cc0-8426-4708-af74-94d178188962": {"__data__": {"id_": "d2ea7cc0-8426-4708-af74-94d178188962", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "869f1ddf-9fc3-4f1f-aa3e-9b811531a093", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1e745c01-a8ca-49ee-a307-aca3a88eeb62", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1e745c01-a8ca-49ee-a307-aca3a88eeb62": {"__data__": {"id_": "1e745c01-a8ca-49ee-a307-aca3a88eeb62", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d2ea7cc0-8426-4708-af74-94d178188962", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9055b746-7c49-483c-ac7d-1cbeedae37b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9055b746-7c49-483c-ac7d-1cbeedae37b7": {"__data__": {"id_": "9055b746-7c49-483c-ac7d-1cbeedae37b7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1e745c01-a8ca-49ee-a307-aca3a88eeb62", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3": {"__data__": {"id_": "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9055b746-7c49-483c-ac7d-1cbeedae37b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7006d86c-2845-4db9-8817-df0a7e5bb69f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7006d86c-2845-4db9-8817-df0a7e5bb69f": {"__data__": {"id_": "7006d86c-2845-4db9-8817-df0a7e5bb69f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45": {"__data__": {"id_": "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7006d86c-2845-4db9-8817-df0a7e5bb69f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2036e1f9-6e05-4fda-a8c8-d447badce37f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2036e1f9-6e05-4fda-a8c8-d447badce37f": {"__data__": {"id_": "2036e1f9-6e05-4fda-a8c8-d447badce37f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3ae182f1-c658-4a98-8cb9-ad409c89333a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3ae182f1-c658-4a98-8cb9-ad409c89333a": {"__data__": {"id_": "3ae182f1-c658-4a98-8cb9-ad409c89333a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2036e1f9-6e05-4fda-a8c8-d447badce37f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8fead42c-fede-4032-a21c-e015976afe68", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8fead42c-fede-4032-a21c-e015976afe68": {"__data__": {"id_": "8fead42c-fede-4032-a21c-e015976afe68", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3ae182f1-c658-4a98-8cb9-ad409c89333a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c9a603ef-68e8-4ae4-8b90-52c5af768453", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c9a603ef-68e8-4ae4-8b90-52c5af768453": {"__data__": {"id_": "c9a603ef-68e8-4ae4-8b90-52c5af768453", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8fead42c-fede-4032-a21c-e015976afe68", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ae816606-c3a5-4306-831e-946db4952968", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ae816606-c3a5-4306-831e-946db4952968": {"__data__": {"id_": "ae816606-c3a5-4306-831e-946db4952968", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c9a603ef-68e8-4ae4-8b90-52c5af768453", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7d4cd77c-5acd-4d9d-a5c0-095110f05de4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7d4cd77c-5acd-4d9d-a5c0-095110f05de4": {"__data__": {"id_": "7d4cd77c-5acd-4d9d-a5c0-095110f05de4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ae816606-c3a5-4306-831e-946db4952968", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09": {"__data__": {"id_": "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7d4cd77c-5acd-4d9d-a5c0-095110f05de4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ae428e09-e434-4705-9982-207ed1ad74e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ae428e09-e434-4705-9982-207ed1ad74e6": {"__data__": {"id_": "ae428e09-e434-4705-9982-207ed1ad74e6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6e221fb0-a26a-4695-a84a-823b7f3e46e2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6e221fb0-a26a-4695-a84a-823b7f3e46e2": {"__data__": {"id_": "6e221fb0-a26a-4695-a84a-823b7f3e46e2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ae428e09-e434-4705-9982-207ed1ad74e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d4fec36c-0dab-415b-97dc-9648b7597a47", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d4fec36c-0dab-415b-97dc-9648b7597a47": {"__data__": {"id_": "d4fec36c-0dab-415b-97dc-9648b7597a47", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6e221fb0-a26a-4695-a84a-823b7f3e46e2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e": {"__data__": {"id_": "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d4fec36c-0dab-415b-97dc-9648b7597a47", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cdd9e12e-4f6b-4580-96e6-076a5400dd7c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cdd9e12e-4f6b-4580-96e6-076a5400dd7c": {"__data__": {"id_": "cdd9e12e-4f6b-4580-96e6-076a5400dd7c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2f6fe182-a7b5-42ab-8590-08f808cce910", "node_type": "1", "metadata": {"window": ". . . . . . . . 21\n3.2 Ensemble configurations . ", "original_text": ". "}, "hash": "e497a4a5f9380bb1cad8892663da6d9ca308b8ed4aae07d24348fda6b585b98d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2f6fe182-a7b5-42ab-8590-08f808cce910": {"__data__": {"id_": "2f6fe182-a7b5-42ab-8590-08f808cce910", "embedding": null, "metadata": {"window": ". . . . . . . . 21\n3.2 Ensemble configurations . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cdd9e12e-4f6b-4580-96e6-076a5400dd7c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861", "node_type": "1", "metadata": {"window": ". . . . . . . 21\n3.2 Ensemble configurations . . ", "original_text": ". "}, "hash": "fd4df70e492eb72615caaca3c48191ea905fd04bd585d0fa078970a8d7598422", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861": {"__data__": {"id_": "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861", "embedding": null, "metadata": {"window": ". . . . . . . 21\n3.2 Ensemble configurations . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2f6fe182-a7b5-42ab-8590-08f808cce910", "node_type": "1", "metadata": {"window": ". . . . . . . . 21\n3.2 Ensemble configurations . ", "original_text": ". "}, "hash": "e497a4a5f9380bb1cad8892663da6d9ca308b8ed4aae07d24348fda6b585b98d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7be05165-dc5c-495c-bdad-233b71645d77", "node_type": "1", "metadata": {"window": ". . . . . . 21\n3.2 Ensemble configurations . . . ", "original_text": ". "}, "hash": "15e3b8f010e513447a39a56452424ba9ddf2bb6b7fffa1248d4a561a24fd5108", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7be05165-dc5c-495c-bdad-233b71645d77": {"__data__": {"id_": "7be05165-dc5c-495c-bdad-233b71645d77", "embedding": null, "metadata": {"window": ". . . . . . 21\n3.2 Ensemble configurations . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861", "node_type": "1", "metadata": {"window": ". . . . . . . 21\n3.2 Ensemble configurations . . ", "original_text": ". "}, "hash": "fd4df70e492eb72615caaca3c48191ea905fd04bd585d0fa078970a8d7598422", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "415501cc-5c85-41d4-b498-139b1e5b59ea", "node_type": "1", "metadata": {"window": ". . . . . 21\n3.2 Ensemble configurations . . . . ", "original_text": ". "}, "hash": "d52d4282378dfdf2d74aa9753ff736cf871f1f76302f9beb88ef28051d11ad6b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "415501cc-5c85-41d4-b498-139b1e5b59ea": {"__data__": {"id_": "415501cc-5c85-41d4-b498-139b1e5b59ea", "embedding": null, "metadata": {"window": ". . . . . 21\n3.2 Ensemble configurations . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7be05165-dc5c-495c-bdad-233b71645d77", "node_type": "1", "metadata": {"window": ". . . . . . 21\n3.2 Ensemble configurations . . . ", "original_text": ". "}, "hash": "15e3b8f010e513447a39a56452424ba9ddf2bb6b7fffa1248d4a561a24fd5108", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dc0f2644-9886-4bd3-b613-4bf23c540d67", "node_type": "1", "metadata": {"window": ". . . . 21\n3.2 Ensemble configurations . . . . . ", "original_text": "21\n3.2 Ensemble configurations . "}, "hash": "5b7b131c8662ebb83f0508e04f5a6ff14f1f667d94d7b1743365bc25cdb22f23", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dc0f2644-9886-4bd3-b613-4bf23c540d67": {"__data__": {"id_": "dc0f2644-9886-4bd3-b613-4bf23c540d67", "embedding": null, "metadata": {"window": ". . . . 21\n3.2 Ensemble configurations . . . . . ", "original_text": "21\n3.2 Ensemble configurations . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "415501cc-5c85-41d4-b498-139b1e5b59ea", "node_type": "1", "metadata": {"window": ". . . . . 21\n3.2 Ensemble configurations . . . . ", "original_text": ". "}, "hash": "d52d4282378dfdf2d74aa9753ff736cf871f1f76302f9beb88ef28051d11ad6b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1ec46459-5948-429c-90a7-bba74d930e53", "node_type": "1", "metadata": {"window": ". . . 21\n3.2 Ensemble configurations . . . . . . ", "original_text": ". "}, "hash": "eaac8f050606db56ed7ec9d21d143d322570da530d1829ff2d112250168d2b6e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "21\n3.2 Ensemble configurations . ", "mimetype": "text/plain", "start_char_idx": 1294, "end_char_idx": 1327, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1ec46459-5948-429c-90a7-bba74d930e53": {"__data__": {"id_": "1ec46459-5948-429c-90a7-bba74d930e53", "embedding": null, "metadata": {"window": ". . . 21\n3.2 Ensemble configurations . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dc0f2644-9886-4bd3-b613-4bf23c540d67", "node_type": "1", "metadata": {"window": ". . . . 21\n3.2 Ensemble configurations . . . . . ", "original_text": "21\n3.2 Ensemble configurations . "}, "hash": "5b7b131c8662ebb83f0508e04f5a6ff14f1f667d94d7b1743365bc25cdb22f23", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "839a4b9c-5a25-4c58-9b36-1cda8b399b23", "node_type": "1", "metadata": {"window": ". . 21\n3.2 Ensemble configurations . . . . . . . ", "original_text": ". "}, "hash": "e15cfa0eecb7adf73cffc69e7c6c5f6959066b98554588ffe4df89a76367919e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "839a4b9c-5a25-4c58-9b36-1cda8b399b23": {"__data__": {"id_": "839a4b9c-5a25-4c58-9b36-1cda8b399b23", "embedding": null, "metadata": {"window": ". . 21\n3.2 Ensemble configurations . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1ec46459-5948-429c-90a7-bba74d930e53", "node_type": "1", "metadata": {"window": ". . . 21\n3.2 Ensemble configurations . . . . . . ", "original_text": ". "}, "hash": "eaac8f050606db56ed7ec9d21d143d322570da530d1829ff2d112250168d2b6e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "05eb5f60-62d6-4053-ad8e-a59f7feccfa5", "node_type": "1", "metadata": {"window": ". 21\n3.2 Ensemble configurations . . . . . . . . ", "original_text": ". "}, "hash": "8e92698c8cbf7b5b61673affccf56644ea80964c52ec7873f46a2542d3fc2975", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "05eb5f60-62d6-4053-ad8e-a59f7feccfa5": {"__data__": {"id_": "05eb5f60-62d6-4053-ad8e-a59f7feccfa5", "embedding": null, "metadata": {"window": ". 21\n3.2 Ensemble configurations . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "839a4b9c-5a25-4c58-9b36-1cda8b399b23", "node_type": "1", "metadata": {"window": ". . 21\n3.2 Ensemble configurations . . . . . . . ", "original_text": ". "}, "hash": "e15cfa0eecb7adf73cffc69e7c6c5f6959066b98554588ffe4df89a76367919e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a6439e08-9f9f-4a55-9322-a745e30da0e0", "node_type": "1", "metadata": {"window": "21\n3.2 Ensemble configurations . . . . . . . . . ", "original_text": ". "}, "hash": "505a3753458d8f1f7f52faf8580b4d300585a22277f19358e8471e819aaba168", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a6439e08-9f9f-4a55-9322-a745e30da0e0": {"__data__": {"id_": "a6439e08-9f9f-4a55-9322-a745e30da0e0", "embedding": null, "metadata": {"window": "21\n3.2 Ensemble configurations . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "05eb5f60-62d6-4053-ad8e-a59f7feccfa5", "node_type": "1", "metadata": {"window": ". 21\n3.2 Ensemble configurations . . . . . . . . ", "original_text": ". "}, "hash": "8e92698c8cbf7b5b61673affccf56644ea80964c52ec7873f46a2542d3fc2975", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dce9210d-fb37-4476-b0b2-78c6f20831bc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dce9210d-fb37-4476-b0b2-78c6f20831bc": {"__data__": {"id_": "dce9210d-fb37-4476-b0b2-78c6f20831bc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a6439e08-9f9f-4a55-9322-a745e30da0e0", "node_type": "1", "metadata": {"window": "21\n3.2 Ensemble configurations . . . . . . . . . ", "original_text": ". "}, "hash": "505a3753458d8f1f7f52faf8580b4d300585a22277f19358e8471e819aaba168", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb": {"__data__": {"id_": "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dce9210d-fb37-4476-b0b2-78c6f20831bc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "255f44fe-4ddb-42d6-af44-4ee3e484b8e2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "255f44fe-4ddb-42d6-af44-4ee3e484b8e2": {"__data__": {"id_": "255f44fe-4ddb-42d6-af44-4ee3e484b8e2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "110eeee4-be40-488e-9482-bd4d6d425399", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "110eeee4-be40-488e-9482-bd4d6d425399": {"__data__": {"id_": "110eeee4-be40-488e-9482-bd4d6d425399", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "255f44fe-4ddb-42d6-af44-4ee3e484b8e2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dfb31c8e-9ed6-4981-b84b-e61e33472f5d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dfb31c8e-9ed6-4981-b84b-e61e33472f5d": {"__data__": {"id_": "dfb31c8e-9ed6-4981-b84b-e61e33472f5d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "110eeee4-be40-488e-9482-bd4d6d425399", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4967230d-0a80-49a7-b7be-0dce5a9df235", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4967230d-0a80-49a7-b7be-0dce5a9df235": {"__data__": {"id_": "4967230d-0a80-49a7-b7be-0dce5a9df235", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dfb31c8e-9ed6-4981-b84b-e61e33472f5d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2bb79a49-347f-46fa-97bd-a7665ba268a4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2bb79a49-347f-46fa-97bd-a7665ba268a4": {"__data__": {"id_": "2bb79a49-347f-46fa-97bd-a7665ba268a4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4967230d-0a80-49a7-b7be-0dce5a9df235", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3614c960-f31a-41c0-822a-58b78a90fa8b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3614c960-f31a-41c0-822a-58b78a90fa8b": {"__data__": {"id_": "3614c960-f31a-41c0-822a-58b78a90fa8b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2bb79a49-347f-46fa-97bd-a7665ba268a4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "96f5db1d-6112-4a0f-8d59-bf99cea35d8a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "96f5db1d-6112-4a0f-8d59-bf99cea35d8a": {"__data__": {"id_": "96f5db1d-6112-4a0f-8d59-bf99cea35d8a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3614c960-f31a-41c0-822a-58b78a90fa8b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3256e618-3a73-483f-9a7f-42dd04087711", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3256e618-3a73-483f-9a7f-42dd04087711": {"__data__": {"id_": "3256e618-3a73-483f-9a7f-42dd04087711", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "96f5db1d-6112-4a0f-8d59-bf99cea35d8a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4b8852e3-782a-45cf-b088-58a1b924c43f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4b8852e3-782a-45cf-b088-58a1b924c43f": {"__data__": {"id_": "4b8852e3-782a-45cf-b088-58a1b924c43f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3256e618-3a73-483f-9a7f-42dd04087711", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d367467-67f1-4367-9aeb-564f16645f66", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d367467-67f1-4367-9aeb-564f16645f66": {"__data__": {"id_": "6d367467-67f1-4367-9aeb-564f16645f66", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4b8852e3-782a-45cf-b088-58a1b924c43f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "29bd39e9-f988-4835-b452-655d140b8d61", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "29bd39e9-f988-4835-b452-655d140b8d61": {"__data__": {"id_": "29bd39e9-f988-4835-b452-655d140b8d61", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d367467-67f1-4367-9aeb-564f16645f66", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7": {"__data__": {"id_": "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "29bd39e9-f988-4835-b452-655d140b8d61", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c": {"__data__": {"id_": "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5fdd60bb-42b4-4a39-a711-8beccccf373f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5fdd60bb-42b4-4a39-a711-8beccccf373f": {"__data__": {"id_": "5fdd60bb-42b4-4a39-a711-8beccccf373f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "979e6a6b-efea-41ca-a7c9-9668f04fbcdd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "979e6a6b-efea-41ca-a7c9-9668f04fbcdd": {"__data__": {"id_": "979e6a6b-efea-41ca-a7c9-9668f04fbcdd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5fdd60bb-42b4-4a39-a711-8beccccf373f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a90ad3d1-bc04-4489-a049-5c713d52d173", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a90ad3d1-bc04-4489-a049-5c713d52d173": {"__data__": {"id_": "a90ad3d1-bc04-4489-a049-5c713d52d173", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "979e6a6b-efea-41ca-a7c9-9668f04fbcdd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c3b62386-aba7-4f94-9000-7f1a28d2ebab", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c3b62386-aba7-4f94-9000-7f1a28d2ebab": {"__data__": {"id_": "c3b62386-aba7-4f94-9000-7f1a28d2ebab", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a90ad3d1-bc04-4489-a049-5c713d52d173", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94": {"__data__": {"id_": "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c3b62386-aba7-4f94-9000-7f1a28d2ebab", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f1af395a-2de9-4ead-8640-5d061341b348", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f1af395a-2de9-4ead-8640-5d061341b348": {"__data__": {"id_": "f1af395a-2de9-4ead-8640-5d061341b348", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1cab2a1b-a166-4690-b494-ec86a00e79c8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1cab2a1b-a166-4690-b494-ec86a00e79c8": {"__data__": {"id_": "1cab2a1b-a166-4690-b494-ec86a00e79c8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f1af395a-2de9-4ead-8640-5d061341b348", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c0e635e7-5e5a-4090-9806-04f1987465a4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c0e635e7-5e5a-4090-9806-04f1987465a4": {"__data__": {"id_": "c0e635e7-5e5a-4090-9806-04f1987465a4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1cab2a1b-a166-4690-b494-ec86a00e79c8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f00071e5-7b83-466a-8454-e7d253f262dc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f00071e5-7b83-466a-8454-e7d253f262dc": {"__data__": {"id_": "f00071e5-7b83-466a-8454-e7d253f262dc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c0e635e7-5e5a-4090-9806-04f1987465a4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9d1ab84e-9be0-459c-8305-c9353a13e749", "node_type": "1", "metadata": {"window": ". . . . . . . . 22\n3.3 Methods . ", "original_text": ". "}, "hash": "88c5837f5b6c481cfb17c85696c1cce11ffa5f33031a3a3f2ffc20fb120e36e0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9d1ab84e-9be0-459c-8305-c9353a13e749": {"__data__": {"id_": "9d1ab84e-9be0-459c-8305-c9353a13e749", "embedding": null, "metadata": {"window": ". . . . . . . . 22\n3.3 Methods . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f00071e5-7b83-466a-8454-e7d253f262dc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec", "node_type": "1", "metadata": {"window": ". . . . . . . 22\n3.3 Methods . . ", "original_text": ". "}, "hash": "4e6832dbb03387a9a5c5012863318cc6ec1afba3303717dc672bb3eb4223836c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec": {"__data__": {"id_": "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec", "embedding": null, "metadata": {"window": ". . . . . . . 22\n3.3 Methods . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9d1ab84e-9be0-459c-8305-c9353a13e749", "node_type": "1", "metadata": {"window": ". . . . . . . . 22\n3.3 Methods . ", "original_text": ". "}, "hash": "88c5837f5b6c481cfb17c85696c1cce11ffa5f33031a3a3f2ffc20fb120e36e0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9aca5a0d-bfa0-4caa-887c-829d210fc9e4", "node_type": "1", "metadata": {"window": ". . . . . . 22\n3.3 Methods . . . ", "original_text": ". "}, "hash": "a2db21686a66e4ffa5784f0be861b8479d3ed7e10da68f353934a22a18b6917c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9aca5a0d-bfa0-4caa-887c-829d210fc9e4": {"__data__": {"id_": "9aca5a0d-bfa0-4caa-887c-829d210fc9e4", "embedding": null, "metadata": {"window": ". . . . . . 22\n3.3 Methods . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec", "node_type": "1", "metadata": {"window": ". . . . . . . 22\n3.3 Methods . . ", "original_text": ". "}, "hash": "4e6832dbb03387a9a5c5012863318cc6ec1afba3303717dc672bb3eb4223836c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a093a811-9296-40fd-ae21-bb64d1c3926a", "node_type": "1", "metadata": {"window": ". . . . . 22\n3.3 Methods . . . . ", "original_text": ". "}, "hash": "800ed23bcc6b441a1d9f68c88a7296142f36a3c18b036212ad0bf94f5ac7de33", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a093a811-9296-40fd-ae21-bb64d1c3926a": {"__data__": {"id_": "a093a811-9296-40fd-ae21-bb64d1c3926a", "embedding": null, "metadata": {"window": ". . . . . 22\n3.3 Methods . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9aca5a0d-bfa0-4caa-887c-829d210fc9e4", "node_type": "1", "metadata": {"window": ". . . . . . 22\n3.3 Methods . . . ", "original_text": ". "}, "hash": "a2db21686a66e4ffa5784f0be861b8479d3ed7e10da68f353934a22a18b6917c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3da0aa0a-6c63-496b-8464-917ae9813f4d", "node_type": "1", "metadata": {"window": ". . . . 22\n3.3 Methods . . . . . ", "original_text": "22\n3.3 Methods . "}, "hash": "b0a7d9900843dad2305e5b95260da53fbb1fac93158bb538bda0ef58da4cfee4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3da0aa0a-6c63-496b-8464-917ae9813f4d": {"__data__": {"id_": "3da0aa0a-6c63-496b-8464-917ae9813f4d", "embedding": null, "metadata": {"window": ". . . . 22\n3.3 Methods . . . . . ", "original_text": "22\n3.3 Methods . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a093a811-9296-40fd-ae21-bb64d1c3926a", "node_type": "1", "metadata": {"window": ". . . . . 22\n3.3 Methods . . . . ", "original_text": ". "}, "hash": "800ed23bcc6b441a1d9f68c88a7296142f36a3c18b036212ad0bf94f5ac7de33", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3ae4bc93-ad47-47fe-9487-34b3926c9cc1", "node_type": "1", "metadata": {"window": ". . . 22\n3.3 Methods . . . . . . ", "original_text": ". "}, "hash": "d8387aab8c9910b78423d6d5409ae095327b112175383f8dc8366b967ac8d305", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "22\n3.3 Methods . ", "mimetype": "text/plain", "start_char_idx": 1391, "end_char_idx": 1408, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3ae4bc93-ad47-47fe-9487-34b3926c9cc1": {"__data__": {"id_": "3ae4bc93-ad47-47fe-9487-34b3926c9cc1", "embedding": null, "metadata": {"window": ". . . 22\n3.3 Methods . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3da0aa0a-6c63-496b-8464-917ae9813f4d", "node_type": "1", "metadata": {"window": ". . . . 22\n3.3 Methods . . . . . ", "original_text": "22\n3.3 Methods . "}, "hash": "b0a7d9900843dad2305e5b95260da53fbb1fac93158bb538bda0ef58da4cfee4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a", "node_type": "1", "metadata": {"window": ". . 22\n3.3 Methods . . . . . . . ", "original_text": ". "}, "hash": "440bd65fe62a37d8cd2ad836e7e1555e1537327413db1b4e204819d304bc5474", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a": {"__data__": {"id_": "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a", "embedding": null, "metadata": {"window": ". . 22\n3.3 Methods . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3ae4bc93-ad47-47fe-9487-34b3926c9cc1", "node_type": "1", "metadata": {"window": ". . . 22\n3.3 Methods . . . . . . ", "original_text": ". "}, "hash": "d8387aab8c9910b78423d6d5409ae095327b112175383f8dc8366b967ac8d305", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "53f66baa-e165-4c00-9798-78b2af372b1f", "node_type": "1", "metadata": {"window": ". 22\n3.3 Methods . . . . . . . . ", "original_text": ". "}, "hash": "15a269acfd6578ba723433f4b3fbe1ac4bb70aba8f28e139d054ff4bfb572a11", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "53f66baa-e165-4c00-9798-78b2af372b1f": {"__data__": {"id_": "53f66baa-e165-4c00-9798-78b2af372b1f", "embedding": null, "metadata": {"window": ". 22\n3.3 Methods . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a", "node_type": "1", "metadata": {"window": ". . 22\n3.3 Methods . . . . . . . ", "original_text": ". "}, "hash": "440bd65fe62a37d8cd2ad836e7e1555e1537327413db1b4e204819d304bc5474", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "af635190-17ea-4bea-9a09-53fc3fa5cde0", "node_type": "1", "metadata": {"window": "22\n3.3 Methods . . . . . . . . . ", "original_text": ". "}, "hash": "957e64c2665a501e46f4ce0bedc0ad9e17050929f444dba7eb864aac55ab6d2d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "af635190-17ea-4bea-9a09-53fc3fa5cde0": {"__data__": {"id_": "af635190-17ea-4bea-9a09-53fc3fa5cde0", "embedding": null, "metadata": {"window": "22\n3.3 Methods . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "53f66baa-e165-4c00-9798-78b2af372b1f", "node_type": "1", "metadata": {"window": ". 22\n3.3 Methods . . . . . . . . ", "original_text": ". "}, "hash": "15a269acfd6578ba723433f4b3fbe1ac4bb70aba8f28e139d054ff4bfb572a11", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "21c4379f-21a1-49d0-91b0-d3bba46ea2f1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "21c4379f-21a1-49d0-91b0-d3bba46ea2f1": {"__data__": {"id_": "21c4379f-21a1-49d0-91b0-d3bba46ea2f1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "af635190-17ea-4bea-9a09-53fc3fa5cde0", "node_type": "1", "metadata": {"window": "22\n3.3 Methods . . . . . . . . . ", "original_text": ". "}, "hash": "957e64c2665a501e46f4ce0bedc0ad9e17050929f444dba7eb864aac55ab6d2d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "81137b75-4255-4120-bb72-1ae7efa1325b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "81137b75-4255-4120-bb72-1ae7efa1325b": {"__data__": {"id_": "81137b75-4255-4120-bb72-1ae7efa1325b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "21c4379f-21a1-49d0-91b0-d3bba46ea2f1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4": {"__data__": {"id_": "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "81137b75-4255-4120-bb72-1ae7efa1325b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d46f79da-9279-46dc-9a5f-f5a3deec2845", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d46f79da-9279-46dc-9a5f-f5a3deec2845": {"__data__": {"id_": "d46f79da-9279-46dc-9a5f-f5a3deec2845", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "730acaec-9cd5-438d-83a3-906f6fac6c1a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "730acaec-9cd5-438d-83a3-906f6fac6c1a": {"__data__": {"id_": "730acaec-9cd5-438d-83a3-906f6fac6c1a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d46f79da-9279-46dc-9a5f-f5a3deec2845", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1f260bbc-837b-446f-906d-401583250bb8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1f260bbc-837b-446f-906d-401583250bb8": {"__data__": {"id_": "1f260bbc-837b-446f-906d-401583250bb8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "730acaec-9cd5-438d-83a3-906f6fac6c1a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c0c9484-6d53-456e-a2c2-36be82b612f2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c0c9484-6d53-456e-a2c2-36be82b612f2": {"__data__": {"id_": "6c0c9484-6d53-456e-a2c2-36be82b612f2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1f260bbc-837b-446f-906d-401583250bb8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f88cf895-0e45-4513-97d7-bf3006ec340a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f88cf895-0e45-4513-97d7-bf3006ec340a": {"__data__": {"id_": "f88cf895-0e45-4513-97d7-bf3006ec340a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c0c9484-6d53-456e-a2c2-36be82b612f2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b93cc96d-a274-4302-9907-e1a8740a8550", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b93cc96d-a274-4302-9907-e1a8740a8550": {"__data__": {"id_": "b93cc96d-a274-4302-9907-e1a8740a8550", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f88cf895-0e45-4513-97d7-bf3006ec340a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "496a8931-174b-4ec1-b4d0-8240115aa89c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "496a8931-174b-4ec1-b4d0-8240115aa89c": {"__data__": {"id_": "496a8931-174b-4ec1-b4d0-8240115aa89c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b93cc96d-a274-4302-9907-e1a8740a8550", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7781f9a9-0ad8-430d-8953-c9a8dcd59017", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7781f9a9-0ad8-430d-8953-c9a8dcd59017": {"__data__": {"id_": "7781f9a9-0ad8-430d-8953-c9a8dcd59017", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "496a8931-174b-4ec1-b4d0-8240115aa89c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "86db7322-ebb3-4d85-936c-bf9e304bd9ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "86db7322-ebb3-4d85-936c-bf9e304bd9ea": {"__data__": {"id_": "86db7322-ebb3-4d85-936c-bf9e304bd9ea", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7781f9a9-0ad8-430d-8953-c9a8dcd59017", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c": {"__data__": {"id_": "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "86db7322-ebb3-4d85-936c-bf9e304bd9ea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c8393f43-baa5-4c12-b51f-37ba84411a70", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c8393f43-baa5-4c12-b51f-37ba84411a70": {"__data__": {"id_": "c8393f43-baa5-4c12-b51f-37ba84411a70", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bab49fba-279d-4ddf-99d1-6706521a71a5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bab49fba-279d-4ddf-99d1-6706521a71a5": {"__data__": {"id_": "bab49fba-279d-4ddf-99d1-6706521a71a5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c8393f43-baa5-4c12-b51f-37ba84411a70", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "77280196-f248-4806-b532-4e77f0d5125b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "77280196-f248-4806-b532-4e77f0d5125b": {"__data__": {"id_": "77280196-f248-4806-b532-4e77f0d5125b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bab49fba-279d-4ddf-99d1-6706521a71a5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "946226d0-b703-4cd6-a543-3ec9a1c0454d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "946226d0-b703-4cd6-a543-3ec9a1c0454d": {"__data__": {"id_": "946226d0-b703-4cd6-a543-3ec9a1c0454d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "77280196-f248-4806-b532-4e77f0d5125b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "81a99583-c412-4cbf-b317-4575c17cff32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "81a99583-c412-4cbf-b317-4575c17cff32": {"__data__": {"id_": "81a99583-c412-4cbf-b317-4575c17cff32", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "946226d0-b703-4cd6-a543-3ec9a1c0454d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f1dfcc71-ce5a-485d-95e1-c39f11ad469a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f1dfcc71-ce5a-485d-95e1-c39f11ad469a": {"__data__": {"id_": "f1dfcc71-ce5a-485d-95e1-c39f11ad469a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "81a99583-c412-4cbf-b317-4575c17cff32", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "09fb64f1-efb2-446a-9970-76715defe156", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "09fb64f1-efb2-446a-9970-76715defe156": {"__data__": {"id_": "09fb64f1-efb2-446a-9970-76715defe156", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f1dfcc71-ce5a-485d-95e1-c39f11ad469a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "94893e9c-a839-4894-a278-be34a33dbe47", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "94893e9c-a839-4894-a278-be34a33dbe47": {"__data__": {"id_": "94893e9c-a839-4894-a278-be34a33dbe47", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "09fb64f1-efb2-446a-9970-76715defe156", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "54d5a007-6cca-46c6-a9d6-1c883f484a9f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "54d5a007-6cca-46c6-a9d6-1c883f484a9f": {"__data__": {"id_": "54d5a007-6cca-46c6-a9d6-1c883f484a9f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "94893e9c-a839-4894-a278-be34a33dbe47", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d": {"__data__": {"id_": "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "54d5a007-6cca-46c6-a9d6-1c883f484a9f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6cbc6819-3e97-4906-8796-87e467f514b9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6cbc6819-3e97-4906-8796-87e467f514b9": {"__data__": {"id_": "6cbc6819-3e97-4906-8796-87e467f514b9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "092ce679-7ef7-441d-aeb5-ce1821c9d7b1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "092ce679-7ef7-441d-aeb5-ce1821c9d7b1": {"__data__": {"id_": "092ce679-7ef7-441d-aeb5-ce1821c9d7b1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6cbc6819-3e97-4906-8796-87e467f514b9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9edf7264-654d-4cee-9cda-9c5181aea82e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9edf7264-654d-4cee-9cda-9c5181aea82e": {"__data__": {"id_": "9edf7264-654d-4cee-9cda-9c5181aea82e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "092ce679-7ef7-441d-aeb5-ce1821c9d7b1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9e0ad855-0f6f-4517-ba5a-6601e91c61c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9e0ad855-0f6f-4517-ba5a-6601e91c61c5": {"__data__": {"id_": "9e0ad855-0f6f-4517-ba5a-6601e91c61c5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9edf7264-654d-4cee-9cda-9c5181aea82e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a3b6b725-aff0-4f13-aef5-d15c79f568ba", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a3b6b725-aff0-4f13-aef5-d15c79f568ba": {"__data__": {"id_": "a3b6b725-aff0-4f13-aef5-d15c79f568ba", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9e0ad855-0f6f-4517-ba5a-6601e91c61c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5129a96e-3298-46f8-af07-6aa5354359b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5129a96e-3298-46f8-af07-6aa5354359b7": {"__data__": {"id_": "5129a96e-3298-46f8-af07-6aa5354359b7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a3b6b725-aff0-4f13-aef5-d15c79f568ba", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e20345d0-f37a-47b3-a84d-5870f8ed09eb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e20345d0-f37a-47b3-a84d-5870f8ed09eb": {"__data__": {"id_": "e20345d0-f37a-47b3-a84d-5870f8ed09eb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5129a96e-3298-46f8-af07-6aa5354359b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4b889ce3-2f21-4aef-9699-6d9017f20a4a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4b889ce3-2f21-4aef-9699-6d9017f20a4a": {"__data__": {"id_": "4b889ce3-2f21-4aef-9699-6d9017f20a4a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e20345d0-f37a-47b3-a84d-5870f8ed09eb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ac5edc31-3f0a-4263-b238-52ba25d2428f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ac5edc31-3f0a-4263-b238-52ba25d2428f": {"__data__": {"id_": "ac5edc31-3f0a-4263-b238-52ba25d2428f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4b889ce3-2f21-4aef-9699-6d9017f20a4a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bade3cdc-cbad-496e-92bb-6aabafd99284", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bade3cdc-cbad-496e-92bb-6aabafd99284": {"__data__": {"id_": "bade3cdc-cbad-496e-92bb-6aabafd99284", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ac5edc31-3f0a-4263-b238-52ba25d2428f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0b30648f-be34-4884-8ea0-279f164999cb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0b30648f-be34-4884-8ea0-279f164999cb": {"__data__": {"id_": "0b30648f-be34-4884-8ea0-279f164999cb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bade3cdc-cbad-496e-92bb-6aabafd99284", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e54e6309-4eea-4571-ac63-383e7b7d0e6e", "node_type": "1", "metadata": {"window": ". . . . . . . . 23\n3.4 Results . ", "original_text": ". "}, "hash": "bdb506645c0a9b0dc1599018641dd836d5d3071d2742c0347108f70b587c3059", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e54e6309-4eea-4571-ac63-383e7b7d0e6e": {"__data__": {"id_": "e54e6309-4eea-4571-ac63-383e7b7d0e6e", "embedding": null, "metadata": {"window": ". . . . . . . . 23\n3.4 Results . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0b30648f-be34-4884-8ea0-279f164999cb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9d380c2f-0971-4951-a89b-a02488b1c89a", "node_type": "1", "metadata": {"window": ". . . . . . . 23\n3.4 Results . . ", "original_text": ". "}, "hash": "f582e3d1f617ebe3050f40746ea5e0d4d12b563deaf385e7f36e441cf1b2f10a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9d380c2f-0971-4951-a89b-a02488b1c89a": {"__data__": {"id_": "9d380c2f-0971-4951-a89b-a02488b1c89a", "embedding": null, "metadata": {"window": ". . . . . . . 23\n3.4 Results . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e54e6309-4eea-4571-ac63-383e7b7d0e6e", "node_type": "1", "metadata": {"window": ". . . . . . . . 23\n3.4 Results . ", "original_text": ". "}, "hash": "bdb506645c0a9b0dc1599018641dd836d5d3071d2742c0347108f70b587c3059", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "71fb7290-95e8-44d9-a3af-772e78899ad9", "node_type": "1", "metadata": {"window": ". . . . . . 23\n3.4 Results . . . ", "original_text": ". "}, "hash": "225e4bc7415326b5bb154eae7892a497656c730c7e5d971c812180d4b1ca5b8d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "71fb7290-95e8-44d9-a3af-772e78899ad9": {"__data__": {"id_": "71fb7290-95e8-44d9-a3af-772e78899ad9", "embedding": null, "metadata": {"window": ". . . . . . 23\n3.4 Results . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9d380c2f-0971-4951-a89b-a02488b1c89a", "node_type": "1", "metadata": {"window": ". . . . . . . 23\n3.4 Results . . ", "original_text": ". "}, "hash": "f582e3d1f617ebe3050f40746ea5e0d4d12b563deaf385e7f36e441cf1b2f10a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7e25fb1b-ec22-43e9-8955-2848f47cc53a", "node_type": "1", "metadata": {"window": ". . . . . 23\n3.4 Results . . . . ", "original_text": ". "}, "hash": "6f61a32f4b3e1d36cd0fa90370bf3e45d684dedb2720640730069fb50475573d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7e25fb1b-ec22-43e9-8955-2848f47cc53a": {"__data__": {"id_": "7e25fb1b-ec22-43e9-8955-2848f47cc53a", "embedding": null, "metadata": {"window": ". . . . . 23\n3.4 Results . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "71fb7290-95e8-44d9-a3af-772e78899ad9", "node_type": "1", "metadata": {"window": ". . . . . . 23\n3.4 Results . . . ", "original_text": ". "}, "hash": "225e4bc7415326b5bb154eae7892a497656c730c7e5d971c812180d4b1ca5b8d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3", "node_type": "1", "metadata": {"window": ". . . . 23\n3.4 Results . . . . . ", "original_text": "23\n3.4 Results . "}, "hash": "7d055530f0d17486ad2647083ac2d639be752dab64aa5a0afbe4fbf66481e129", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3": {"__data__": {"id_": "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3", "embedding": null, "metadata": {"window": ". . . . 23\n3.4 Results . . . . . ", "original_text": "23\n3.4 Results . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7e25fb1b-ec22-43e9-8955-2848f47cc53a", "node_type": "1", "metadata": {"window": ". . . . . 23\n3.4 Results . . . . ", "original_text": ". "}, "hash": "6f61a32f4b3e1d36cd0fa90370bf3e45d684dedb2720640730069fb50475573d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fd845739-d1a4-4871-88ae-b7d5cdd0764c", "node_type": "1", "metadata": {"window": ". . . 23\n3.4 Results . . . . . . ", "original_text": ". "}, "hash": "257002bdaea2bca3a3521e658f0a3adf80ba8ca5045abaaac82cd18960bd5319", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "23\n3.4 Results . ", "mimetype": "text/plain", "start_char_idx": 1492, "end_char_idx": 1509, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fd845739-d1a4-4871-88ae-b7d5cdd0764c": {"__data__": {"id_": "fd845739-d1a4-4871-88ae-b7d5cdd0764c", "embedding": null, "metadata": {"window": ". . . 23\n3.4 Results . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3", "node_type": "1", "metadata": {"window": ". . . . 23\n3.4 Results . . . . . ", "original_text": "23\n3.4 Results . "}, "hash": "7d055530f0d17486ad2647083ac2d639be752dab64aa5a0afbe4fbf66481e129", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "da92202f-20c9-4ce3-862f-27adfeccfafc", "node_type": "1", "metadata": {"window": ". . 23\n3.4 Results . . . . . . . ", "original_text": ". "}, "hash": "59f83e545b8ed45da3ac76e1daa72cd5a61682cacb2b7e188602d9b135d8364b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "da92202f-20c9-4ce3-862f-27adfeccfafc": {"__data__": {"id_": "da92202f-20c9-4ce3-862f-27adfeccfafc", "embedding": null, "metadata": {"window": ". . 23\n3.4 Results . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fd845739-d1a4-4871-88ae-b7d5cdd0764c", "node_type": "1", "metadata": {"window": ". . . 23\n3.4 Results . . . . . . ", "original_text": ". "}, "hash": "257002bdaea2bca3a3521e658f0a3adf80ba8ca5045abaaac82cd18960bd5319", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "07c81a94-1fcc-4c02-a931-6deffb9413b1", "node_type": "1", "metadata": {"window": ". 23\n3.4 Results . . . . . . . . ", "original_text": ". "}, "hash": "e9413569db1221627f44fcdfe117c4d267ad5901dc07185a54f3b6bd57c132ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "07c81a94-1fcc-4c02-a931-6deffb9413b1": {"__data__": {"id_": "07c81a94-1fcc-4c02-a931-6deffb9413b1", "embedding": null, "metadata": {"window": ". 23\n3.4 Results . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "da92202f-20c9-4ce3-862f-27adfeccfafc", "node_type": "1", "metadata": {"window": ". . 23\n3.4 Results . . . . . . . ", "original_text": ". "}, "hash": "59f83e545b8ed45da3ac76e1daa72cd5a61682cacb2b7e188602d9b135d8364b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "28e282d4-d383-4ef2-9734-e105eab0a626", "node_type": "1", "metadata": {"window": "23\n3.4 Results . . . . . . . . . ", "original_text": ". "}, "hash": "ad4c323eacf2a5f90ba3da6e58e1a84f07550f84e3702f3c2989b948151b7ffd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "28e282d4-d383-4ef2-9734-e105eab0a626": {"__data__": {"id_": "28e282d4-d383-4ef2-9734-e105eab0a626", "embedding": null, "metadata": {"window": "23\n3.4 Results . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "07c81a94-1fcc-4c02-a931-6deffb9413b1", "node_type": "1", "metadata": {"window": ". 23\n3.4 Results . . . . . . . . ", "original_text": ". "}, "hash": "e9413569db1221627f44fcdfe117c4d267ad5901dc07185a54f3b6bd57c132ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "355812e1-e2a6-4667-aa2e-98acdd554b75", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "355812e1-e2a6-4667-aa2e-98acdd554b75": {"__data__": {"id_": "355812e1-e2a6-4667-aa2e-98acdd554b75", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "28e282d4-d383-4ef2-9734-e105eab0a626", "node_type": "1", "metadata": {"window": "23\n3.4 Results . . . . . . . . . ", "original_text": ". "}, "hash": "ad4c323eacf2a5f90ba3da6e58e1a84f07550f84e3702f3c2989b948151b7ffd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ecc06446-ed65-4244-ae9d-7d15416170c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ecc06446-ed65-4244-ae9d-7d15416170c4": {"__data__": {"id_": "ecc06446-ed65-4244-ae9d-7d15416170c4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "355812e1-e2a6-4667-aa2e-98acdd554b75", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2": {"__data__": {"id_": "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ecc06446-ed65-4244-ae9d-7d15416170c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0": {"__data__": {"id_": "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "092dab60-215c-4a6b-a318-baa4a71cb6e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "092dab60-215c-4a6b-a318-baa4a71cb6e6": {"__data__": {"id_": "092dab60-215c-4a6b-a318-baa4a71cb6e6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8b56582f-8d09-4ab2-bcac-a56d4e598d0e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8b56582f-8d09-4ab2-bcac-a56d4e598d0e": {"__data__": {"id_": "8b56582f-8d09-4ab2-bcac-a56d4e598d0e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "092dab60-215c-4a6b-a318-baa4a71cb6e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "668b0c4b-a595-4f8c-a94b-7cb830568d45", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "668b0c4b-a595-4f8c-a94b-7cb830568d45": {"__data__": {"id_": "668b0c4b-a595-4f8c-a94b-7cb830568d45", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8b56582f-8d09-4ab2-bcac-a56d4e598d0e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8": {"__data__": {"id_": "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "668b0c4b-a595-4f8c-a94b-7cb830568d45", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "149e944c-9220-457e-8aad-8d978e045fcb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "149e944c-9220-457e-8aad-8d978e045fcb": {"__data__": {"id_": "149e944c-9220-457e-8aad-8d978e045fcb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3129721a-98df-4fed-8867-8a80ed5acdcb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3129721a-98df-4fed-8867-8a80ed5acdcb": {"__data__": {"id_": "3129721a-98df-4fed-8867-8a80ed5acdcb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "149e944c-9220-457e-8aad-8d978e045fcb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bab2dbe0-29f6-4f3f-952c-82fd967d2183", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bab2dbe0-29f6-4f3f-952c-82fd967d2183": {"__data__": {"id_": "bab2dbe0-29f6-4f3f-952c-82fd967d2183", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3129721a-98df-4fed-8867-8a80ed5acdcb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5aa5a4c6-3c47-42ca-bbc5-9834a733c697", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5aa5a4c6-3c47-42ca-bbc5-9834a733c697": {"__data__": {"id_": "5aa5a4c6-3c47-42ca-bbc5-9834a733c697", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bab2dbe0-29f6-4f3f-952c-82fd967d2183", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a1063a3a-0623-4ffb-9966-3a2e837d464d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a1063a3a-0623-4ffb-9966-3a2e837d464d": {"__data__": {"id_": "a1063a3a-0623-4ffb-9966-3a2e837d464d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5aa5a4c6-3c47-42ca-bbc5-9834a733c697", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7da6c025-4ad1-4704-8ffd-47687a86d70c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7da6c025-4ad1-4704-8ffd-47687a86d70c": {"__data__": {"id_": "7da6c025-4ad1-4704-8ffd-47687a86d70c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a1063a3a-0623-4ffb-9966-3a2e837d464d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5d999c37-1884-43d5-98e8-af8f1cd7dc7e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5d999c37-1884-43d5-98e8-af8f1cd7dc7e": {"__data__": {"id_": "5d999c37-1884-43d5-98e8-af8f1cd7dc7e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7da6c025-4ad1-4704-8ffd-47687a86d70c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "64fa903e-9660-4635-a981-d294f06a84d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "64fa903e-9660-4635-a981-d294f06a84d5": {"__data__": {"id_": "64fa903e-9660-4635-a981-d294f06a84d5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5d999c37-1884-43d5-98e8-af8f1cd7dc7e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d4d84da8-a760-4268-9aef-a65ec802e334", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d4d84da8-a760-4268-9aef-a65ec802e334": {"__data__": {"id_": "d4d84da8-a760-4268-9aef-a65ec802e334", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "64fa903e-9660-4635-a981-d294f06a84d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b07c3f13-1d2b-4d31-ae22-c49fe559beea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b07c3f13-1d2b-4d31-ae22-c49fe559beea": {"__data__": {"id_": "b07c3f13-1d2b-4d31-ae22-c49fe559beea", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d4d84da8-a760-4268-9aef-a65ec802e334", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f7ce6b27-91aa-452e-996f-f57452dc85ab", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f7ce6b27-91aa-452e-996f-f57452dc85ab": {"__data__": {"id_": "f7ce6b27-91aa-452e-996f-f57452dc85ab", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b07c3f13-1d2b-4d31-ae22-c49fe559beea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17871de6-4898-4e90-91ed-8033633283e8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17871de6-4898-4e90-91ed-8033633283e8": {"__data__": {"id_": "17871de6-4898-4e90-91ed-8033633283e8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f7ce6b27-91aa-452e-996f-f57452dc85ab", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b349a5a0-326e-4005-b01c-255633ac3afc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b349a5a0-326e-4005-b01c-255633ac3afc": {"__data__": {"id_": "b349a5a0-326e-4005-b01c-255633ac3afc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17871de6-4898-4e90-91ed-8033633283e8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2930af96-d7f1-40fc-b07e-559ed43d60df", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2930af96-d7f1-40fc-b07e-559ed43d60df": {"__data__": {"id_": "2930af96-d7f1-40fc-b07e-559ed43d60df", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b349a5a0-326e-4005-b01c-255633ac3afc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d86b3b8-ac8f-492e-8472-3fc387043a69", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d86b3b8-ac8f-492e-8472-3fc387043a69": {"__data__": {"id_": "6d86b3b8-ac8f-492e-8472-3fc387043a69", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2930af96-d7f1-40fc-b07e-559ed43d60df", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "13b3685c-15b4-4115-b0c9-f3dcbb9508f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "13b3685c-15b4-4115-b0c9-f3dcbb9508f7": {"__data__": {"id_": "13b3685c-15b4-4115-b0c9-f3dcbb9508f7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d86b3b8-ac8f-492e-8472-3fc387043a69", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe681619-8376-432a-b85e-338bca89aad7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe681619-8376-432a-b85e-338bca89aad7": {"__data__": {"id_": "fe681619-8376-432a-b85e-338bca89aad7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "13b3685c-15b4-4115-b0c9-f3dcbb9508f7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6": {"__data__": {"id_": "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe681619-8376-432a-b85e-338bca89aad7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bc9eed1e-c16f-40c2-8a18-4e4725dac437", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bc9eed1e-c16f-40c2-8a18-4e4725dac437": {"__data__": {"id_": "bc9eed1e-c16f-40c2-8a18-4e4725dac437", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd": {"__data__": {"id_": "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bc9eed1e-c16f-40c2-8a18-4e4725dac437", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cf8aa5df-3520-47fd-b204-afb421cfc025", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cf8aa5df-3520-47fd-b204-afb421cfc025": {"__data__": {"id_": "cf8aa5df-3520-47fd-b204-afb421cfc025", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "14a35976-41ea-4c11-a2a4-43db46bf3f6a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "14a35976-41ea-4c11-a2a4-43db46bf3f6a": {"__data__": {"id_": "14a35976-41ea-4c11-a2a4-43db46bf3f6a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cf8aa5df-3520-47fd-b204-afb421cfc025", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2d0ed9d0-6464-4d90-8711-abe0602cd051", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2d0ed9d0-6464-4d90-8711-abe0602cd051": {"__data__": {"id_": "2d0ed9d0-6464-4d90-8711-abe0602cd051", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "14a35976-41ea-4c11-a2a4-43db46bf3f6a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "694a7c54-f39c-4995-9140-01b087727fea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "694a7c54-f39c-4995-9140-01b087727fea": {"__data__": {"id_": "694a7c54-f39c-4995-9140-01b087727fea", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2d0ed9d0-6464-4d90-8711-abe0602cd051", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "18dcadee-14c1-4814-bd7b-c294c45c679f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "18dcadee-14c1-4814-bd7b-c294c45c679f": {"__data__": {"id_": "18dcadee-14c1-4814-bd7b-c294c45c679f", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "694a7c54-f39c-4995-9140-01b087727fea", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a44f06a5-9aff-4e26-8d2b-99ce774131ca", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a44f06a5-9aff-4e26-8d2b-99ce774131ca": {"__data__": {"id_": "a44f06a5-9aff-4e26-8d2b-99ce774131ca", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "18dcadee-14c1-4814-bd7b-c294c45c679f", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8": {"__data__": {"id_": "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a44f06a5-9aff-4e26-8d2b-99ce774131ca", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "45cac001-01a0-4da7-8f3d-60263e6f879b", "node_type": "1", "metadata": {"window": ". . . . . . . . 24\n3.4.1 Scores distributions . ", "original_text": ". "}, "hash": "9e83caf19079d7c9b4eaad193fc10307ceeb2d41fc6e1f000d1a6a3e0a43e2de", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "45cac001-01a0-4da7-8f3d-60263e6f879b": {"__data__": {"id_": "45cac001-01a0-4da7-8f3d-60263e6f879b", "embedding": null, "metadata": {"window": ". . . . . . . . 24\n3.4.1 Scores distributions . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "26da1a0d-8331-459f-91ff-1949035b46f8", "node_type": "1", "metadata": {"window": ". . . . . . . 24\n3.4.1 Scores distributions . . ", "original_text": ". "}, "hash": "820ef9d34c6170070a883a543b3aee6733dbcacadaa6274c3bf4dd90957acbed", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "26da1a0d-8331-459f-91ff-1949035b46f8": {"__data__": {"id_": "26da1a0d-8331-459f-91ff-1949035b46f8", "embedding": null, "metadata": {"window": ". . . . . . . 24\n3.4.1 Scores distributions . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "45cac001-01a0-4da7-8f3d-60263e6f879b", "node_type": "1", "metadata": {"window": ". . . . . . . . 24\n3.4.1 Scores distributions . ", "original_text": ". "}, "hash": "9e83caf19079d7c9b4eaad193fc10307ceeb2d41fc6e1f000d1a6a3e0a43e2de", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb", "node_type": "1", "metadata": {"window": ". . . . . . 24\n3.4.1 Scores distributions . . . ", "original_text": ". "}, "hash": "9d729e267043de586aecbb5c56c3de61f8f395c3768271be2df4874275d7aca2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb": {"__data__": {"id_": "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb", "embedding": null, "metadata": {"window": ". . . . . . 24\n3.4.1 Scores distributions . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "26da1a0d-8331-459f-91ff-1949035b46f8", "node_type": "1", "metadata": {"window": ". . . . . . . 24\n3.4.1 Scores distributions . . ", "original_text": ". "}, "hash": "820ef9d34c6170070a883a543b3aee6733dbcacadaa6274c3bf4dd90957acbed", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7", "node_type": "1", "metadata": {"window": ". . . . . 24\n3.4.1 Scores distributions . . . . ", "original_text": ". "}, "hash": "4d2f29d6d65f3f211e8fae159387cc9d8e623089aab62124017b56a4623186b0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7": {"__data__": {"id_": "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7", "embedding": null, "metadata": {"window": ". . . . . 24\n3.4.1 Scores distributions . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb", "node_type": "1", "metadata": {"window": ". . . . . . 24\n3.4.1 Scores distributions . . . ", "original_text": ". "}, "hash": "9d729e267043de586aecbb5c56c3de61f8f395c3768271be2df4874275d7aca2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "73afef95-0f70-4310-a611-ed3a2059ce0b", "node_type": "1", "metadata": {"window": ". . . . 24\n3.4.1 Scores distributions . . . . . ", "original_text": "24\n3.4.1 Scores distributions . "}, "hash": "472098ca692e3feb03319da8c8abdc1d1fd92e3d711284ade6552dfbe20e29b5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "73afef95-0f70-4310-a611-ed3a2059ce0b": {"__data__": {"id_": "73afef95-0f70-4310-a611-ed3a2059ce0b", "embedding": null, "metadata": {"window": ". . . . 24\n3.4.1 Scores distributions . . . . . ", "original_text": "24\n3.4.1 Scores distributions . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7", "node_type": "1", "metadata": {"window": ". . . . . 24\n3.4.1 Scores distributions . . . . ", "original_text": ". "}, "hash": "4d2f29d6d65f3f211e8fae159387cc9d8e623089aab62124017b56a4623186b0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1323a387-36d6-438f-b7d0-dfe2a2deed7d", "node_type": "1", "metadata": {"window": ". . . 24\n3.4.1 Scores distributions . . . . . . ", "original_text": ". "}, "hash": "f6f4436c431f063b7eb06a72bc8a0200c909c2c1a555994efb5bf1beb360240d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "24\n3.4.1 Scores distributions . ", "mimetype": "text/plain", "start_char_idx": 1595, "end_char_idx": 1627, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1323a387-36d6-438f-b7d0-dfe2a2deed7d": {"__data__": {"id_": "1323a387-36d6-438f-b7d0-dfe2a2deed7d", "embedding": null, "metadata": {"window": ". . . 24\n3.4.1 Scores distributions . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "73afef95-0f70-4310-a611-ed3a2059ce0b", "node_type": "1", "metadata": {"window": ". . . . 24\n3.4.1 Scores distributions . . . . . ", "original_text": "24\n3.4.1 Scores distributions . "}, "hash": "472098ca692e3feb03319da8c8abdc1d1fd92e3d711284ade6552dfbe20e29b5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1f54059a-c723-4c42-96c3-2524ce7e09d0", "node_type": "1", "metadata": {"window": ". . 24\n3.4.1 Scores distributions . . . . . . . ", "original_text": ". "}, "hash": "3ab99f2afa3d9328d579e2ccb952fca33c77c721fd11d83455d863dcaf88dd42", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1f54059a-c723-4c42-96c3-2524ce7e09d0": {"__data__": {"id_": "1f54059a-c723-4c42-96c3-2524ce7e09d0", "embedding": null, "metadata": {"window": ". . 24\n3.4.1 Scores distributions . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1323a387-36d6-438f-b7d0-dfe2a2deed7d", "node_type": "1", "metadata": {"window": ". . . 24\n3.4.1 Scores distributions . . . . . . ", "original_text": ". "}, "hash": "f6f4436c431f063b7eb06a72bc8a0200c909c2c1a555994efb5bf1beb360240d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a1a1c3af-82f5-41bb-b6cd-d37020208ba1", "node_type": "1", "metadata": {"window": ". 24\n3.4.1 Scores distributions . . . . . . . . ", "original_text": ". "}, "hash": "c6df5c0e536823d56c90bb65fd13b4a1f9477f394762efc085b7c2584347c407", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a1a1c3af-82f5-41bb-b6cd-d37020208ba1": {"__data__": {"id_": "a1a1c3af-82f5-41bb-b6cd-d37020208ba1", "embedding": null, "metadata": {"window": ". 24\n3.4.1 Scores distributions . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1f54059a-c723-4c42-96c3-2524ce7e09d0", "node_type": "1", "metadata": {"window": ". . 24\n3.4.1 Scores distributions . . . . . . . ", "original_text": ". "}, "hash": "3ab99f2afa3d9328d579e2ccb952fca33c77c721fd11d83455d863dcaf88dd42", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "738e008e-920d-4eda-8be5-51b0e3023658", "node_type": "1", "metadata": {"window": "24\n3.4.1 Scores distributions . . . . . . . . . ", "original_text": ". "}, "hash": "76c2aa6dd0055fb1a973d716de8d643396c031bb13fc84086d737e9c25597745", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "738e008e-920d-4eda-8be5-51b0e3023658": {"__data__": {"id_": "738e008e-920d-4eda-8be5-51b0e3023658", "embedding": null, "metadata": {"window": "24\n3.4.1 Scores distributions . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a1a1c3af-82f5-41bb-b6cd-d37020208ba1", "node_type": "1", "metadata": {"window": ". 24\n3.4.1 Scores distributions . . . . . . . . ", "original_text": ". "}, "hash": "c6df5c0e536823d56c90bb65fd13b4a1f9477f394762efc085b7c2584347c407", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a518875e-e461-4b06-87cf-29b517f4324c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a518875e-e461-4b06-87cf-29b517f4324c": {"__data__": {"id_": "a518875e-e461-4b06-87cf-29b517f4324c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "738e008e-920d-4eda-8be5-51b0e3023658", "node_type": "1", "metadata": {"window": "24\n3.4.1 Scores distributions . . . . . . . . . ", "original_text": ". "}, "hash": "76c2aa6dd0055fb1a973d716de8d643396c031bb13fc84086d737e9c25597745", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd": {"__data__": {"id_": "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a518875e-e461-4b06-87cf-29b517f4324c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "240e6559-3551-480f-845f-c231d7e880c3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "240e6559-3551-480f-845f-c231d7e880c3": {"__data__": {"id_": "240e6559-3551-480f-845f-c231d7e880c3", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "71d65038-5759-4e21-a35a-5812d4c33515", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "71d65038-5759-4e21-a35a-5812d4c33515": {"__data__": {"id_": "71d65038-5759-4e21-a35a-5812d4c33515", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "240e6559-3551-480f-845f-c231d7e880c3", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833": {"__data__": {"id_": "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "71d65038-5759-4e21-a35a-5812d4c33515", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b7a40572-a6c4-4da5-9588-f6084f3300a1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b7a40572-a6c4-4da5-9588-f6084f3300a1": {"__data__": {"id_": "b7a40572-a6c4-4da5-9588-f6084f3300a1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a8b55d47-4aa2-4b87-a158-ee5cb59645d0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a8b55d47-4aa2-4b87-a158-ee5cb59645d0": {"__data__": {"id_": "a8b55d47-4aa2-4b87-a158-ee5cb59645d0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b7a40572-a6c4-4da5-9588-f6084f3300a1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "705a606f-907e-4ef8-98d2-6c2e0131b8af", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "705a606f-907e-4ef8-98d2-6c2e0131b8af": {"__data__": {"id_": "705a606f-907e-4ef8-98d2-6c2e0131b8af", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a8b55d47-4aa2-4b87-a158-ee5cb59645d0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c": {"__data__": {"id_": "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "705a606f-907e-4ef8-98d2-6c2e0131b8af", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0889bdef-1ead-4d33-9a3b-1e6c833481ec", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0889bdef-1ead-4d33-9a3b-1e6c833481ec": {"__data__": {"id_": "0889bdef-1ead-4d33-9a3b-1e6c833481ec", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8c20904a-ed17-475d-a8bb-2c30ba333adc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8c20904a-ed17-475d-a8bb-2c30ba333adc": {"__data__": {"id_": "8c20904a-ed17-475d-a8bb-2c30ba333adc", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0889bdef-1ead-4d33-9a3b-1e6c833481ec", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "498efbbd-a7a3-4878-b748-fd8a566347f4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "498efbbd-a7a3-4878-b748-fd8a566347f4": {"__data__": {"id_": "498efbbd-a7a3-4878-b748-fd8a566347f4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8c20904a-ed17-475d-a8bb-2c30ba333adc", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5045030b-9dbc-43f0-9e82-2cb8315425d9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5045030b-9dbc-43f0-9e82-2cb8315425d9": {"__data__": {"id_": "5045030b-9dbc-43f0-9e82-2cb8315425d9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "498efbbd-a7a3-4878-b748-fd8a566347f4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "92a4d7fc-1742-4762-b5cd-306554f29e1b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "92a4d7fc-1742-4762-b5cd-306554f29e1b": {"__data__": {"id_": "92a4d7fc-1742-4762-b5cd-306554f29e1b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5045030b-9dbc-43f0-9e82-2cb8315425d9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd": {"__data__": {"id_": "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "92a4d7fc-1742-4762-b5cd-306554f29e1b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e02b1a27-c64a-4020-8a40-e3f42e544108", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e02b1a27-c64a-4020-8a40-e3f42e544108": {"__data__": {"id_": "e02b1a27-c64a-4020-8a40-e3f42e544108", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "376e6950-0ba6-4126-90f6-0efeea3846b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "376e6950-0ba6-4126-90f6-0efeea3846b7": {"__data__": {"id_": "376e6950-0ba6-4126-90f6-0efeea3846b7", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e02b1a27-c64a-4020-8a40-e3f42e544108", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1a21dd03-afd3-478f-9fed-ec84bea64f17", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1a21dd03-afd3-478f-9fed-ec84bea64f17": {"__data__": {"id_": "1a21dd03-afd3-478f-9fed-ec84bea64f17", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "376e6950-0ba6-4126-90f6-0efeea3846b7", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8bcfe243-3553-417f-ad9c-db3ff9c79752", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8bcfe243-3553-417f-ad9c-db3ff9c79752": {"__data__": {"id_": "8bcfe243-3553-417f-ad9c-db3ff9c79752", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1a21dd03-afd3-478f-9fed-ec84bea64f17", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "40ea12f3-392a-4db0-9196-31f2e75ac941", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "40ea12f3-392a-4db0-9196-31f2e75ac941": {"__data__": {"id_": "40ea12f3-392a-4db0-9196-31f2e75ac941", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8bcfe243-3553-417f-ad9c-db3ff9c79752", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "611c51f7-fdf7-4315-95da-5695bd1832c0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "611c51f7-fdf7-4315-95da-5695bd1832c0": {"__data__": {"id_": "611c51f7-fdf7-4315-95da-5695bd1832c0", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "40ea12f3-392a-4db0-9196-31f2e75ac941", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "95c07636-ecaf-4d9c-8814-7665fd813129", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "95c07636-ecaf-4d9c-8814-7665fd813129": {"__data__": {"id_": "95c07636-ecaf-4d9c-8814-7665fd813129", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "611c51f7-fdf7-4315-95da-5695bd1832c0", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "91038a72-7d16-4de9-a5cd-c188a75ac4c1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "91038a72-7d16-4de9-a5cd-c188a75ac4c1": {"__data__": {"id_": "91038a72-7d16-4de9-a5cd-c188a75ac4c1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "95c07636-ecaf-4d9c-8814-7665fd813129", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2c5c9baa-8321-46ed-8347-753933ecc87e", "node_type": "1", "metadata": {"window": ". . . . . . . . 24\n3.4.2 Correlation between scores . ", "original_text": ". "}, "hash": "ed4f30f758570bf0cf3174da054631cef3cf48772b574fe8d0eedfc553e4ba93", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2c5c9baa-8321-46ed-8347-753933ecc87e": {"__data__": {"id_": "2c5c9baa-8321-46ed-8347-753933ecc87e", "embedding": null, "metadata": {"window": ". . . . . . . . 24\n3.4.2 Correlation between scores . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "91038a72-7d16-4de9-a5cd-c188a75ac4c1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb", "node_type": "1", "metadata": {"window": ". . . . . . . 24\n3.4.2 Correlation between scores . . ", "original_text": ". "}, "hash": "422d05881417848eef727f4e6a7965afe9956d271b479388507eb890d9e68007", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb": {"__data__": {"id_": "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb", "embedding": null, "metadata": {"window": ". . . . . . . 24\n3.4.2 Correlation between scores . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2c5c9baa-8321-46ed-8347-753933ecc87e", "node_type": "1", "metadata": {"window": ". . . . . . . . 24\n3.4.2 Correlation between scores . ", "original_text": ". "}, "hash": "ed4f30f758570bf0cf3174da054631cef3cf48772b574fe8d0eedfc553e4ba93", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "74773f3d-aebc-40f7-9188-3b1aae82615e", "node_type": "1", "metadata": {"window": ". . . . . . 24\n3.4.2 Correlation between scores . . . ", "original_text": ". "}, "hash": "96c18c377cf0e980ef5605426fc7fa524443e072dd23bb8288c77c41b0f59267", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "74773f3d-aebc-40f7-9188-3b1aae82615e": {"__data__": {"id_": "74773f3d-aebc-40f7-9188-3b1aae82615e", "embedding": null, "metadata": {"window": ". . . . . . 24\n3.4.2 Correlation between scores . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb", "node_type": "1", "metadata": {"window": ". . . . . . . 24\n3.4.2 Correlation between scores . . ", "original_text": ". "}, "hash": "422d05881417848eef727f4e6a7965afe9956d271b479388507eb890d9e68007", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e6f4ab6f-6382-4a68-9385-24c615625fa7", "node_type": "1", "metadata": {"window": ". . . . . 24\n3.4.2 Correlation between scores . . . . ", "original_text": ". "}, "hash": "6e0717bbbad415bd35daef0217ccda05fbf7b8cbe645e91fa1147a66c6d7b044", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e6f4ab6f-6382-4a68-9385-24c615625fa7": {"__data__": {"id_": "e6f4ab6f-6382-4a68-9385-24c615625fa7", "embedding": null, "metadata": {"window": ". . . . . 24\n3.4.2 Correlation between scores . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "74773f3d-aebc-40f7-9188-3b1aae82615e", "node_type": "1", "metadata": {"window": ". . . . . . 24\n3.4.2 Correlation between scores . . . ", "original_text": ". "}, "hash": "96c18c377cf0e980ef5605426fc7fa524443e072dd23bb8288c77c41b0f59267", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ef709998-c023-4d7e-9fbb-64bbc42a62b5", "node_type": "1", "metadata": {"window": ". . . . 24\n3.4.2 Correlation between scores . . . . . ", "original_text": "24\n3.4.2 Correlation between scores . "}, "hash": "d9f92286fb621058b95a4958ccfba1e7c711315ba2e618ed495d07d9205d9b15", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ef709998-c023-4d7e-9fbb-64bbc42a62b5": {"__data__": {"id_": "ef709998-c023-4d7e-9fbb-64bbc42a62b5", "embedding": null, "metadata": {"window": ". . . . 24\n3.4.2 Correlation between scores . . . . . ", "original_text": "24\n3.4.2 Correlation between scores . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e6f4ab6f-6382-4a68-9385-24c615625fa7", "node_type": "1", "metadata": {"window": ". . . . . 24\n3.4.2 Correlation between scores . . . . ", "original_text": ". "}, "hash": "6e0717bbbad415bd35daef0217ccda05fbf7b8cbe645e91fa1147a66c6d7b044", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42547948-7501-491a-9364-bbb7a01632d7", "node_type": "1", "metadata": {"window": ". . . 24\n3.4.2 Correlation between scores . . . . . . ", "original_text": ". "}, "hash": "abfcc1d4d6a1755ee2ee631ea28f943f760f3223abd8dd7f8b5db853d4e82696", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "24\n3.4.2 Correlation between scores . ", "mimetype": "text/plain", "start_char_idx": 1689, "end_char_idx": 1727, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42547948-7501-491a-9364-bbb7a01632d7": {"__data__": {"id_": "42547948-7501-491a-9364-bbb7a01632d7", "embedding": null, "metadata": {"window": ". . . 24\n3.4.2 Correlation between scores . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ef709998-c023-4d7e-9fbb-64bbc42a62b5", "node_type": "1", "metadata": {"window": ". . . . 24\n3.4.2 Correlation between scores . . . . . ", "original_text": "24\n3.4.2 Correlation between scores . "}, "hash": "d9f92286fb621058b95a4958ccfba1e7c711315ba2e618ed495d07d9205d9b15", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c0a9226f-82c2-457b-bb4d-7dbdc124e13a", "node_type": "1", "metadata": {"window": ". . 24\n3.4.2 Correlation between scores . . . . . . . ", "original_text": ". "}, "hash": "ce5260a19dedf8771282968f8680482a445371b78746ac6307a9618c2be4fc5d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c0a9226f-82c2-457b-bb4d-7dbdc124e13a": {"__data__": {"id_": "c0a9226f-82c2-457b-bb4d-7dbdc124e13a", "embedding": null, "metadata": {"window": ". . 24\n3.4.2 Correlation between scores . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42547948-7501-491a-9364-bbb7a01632d7", "node_type": "1", "metadata": {"window": ". . . 24\n3.4.2 Correlation between scores . . . . . . ", "original_text": ". "}, "hash": "abfcc1d4d6a1755ee2ee631ea28f943f760f3223abd8dd7f8b5db853d4e82696", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "51596bd6-9f3b-4986-ae08-0f2ba67eb203", "node_type": "1", "metadata": {"window": ". 24\n3.4.2 Correlation between scores . . . . . . . . ", "original_text": ". "}, "hash": "f58dc24c2632684e912df904bf1d8a143192c627e444b639b47fb54670ec37d1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "51596bd6-9f3b-4986-ae08-0f2ba67eb203": {"__data__": {"id_": "51596bd6-9f3b-4986-ae08-0f2ba67eb203", "embedding": null, "metadata": {"window": ". 24\n3.4.2 Correlation between scores . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c0a9226f-82c2-457b-bb4d-7dbdc124e13a", "node_type": "1", "metadata": {"window": ". . 24\n3.4.2 Correlation between scores . . . . . . . ", "original_text": ". "}, "hash": "ce5260a19dedf8771282968f8680482a445371b78746ac6307a9618c2be4fc5d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2e5d58b5-ca35-485b-969f-f73f725495ac", "node_type": "1", "metadata": {"window": "24\n3.4.2 Correlation between scores . . . . . . . . . ", "original_text": ". "}, "hash": "094d15d9d4323c058dcfed0a3fc1c8e0ff9301b3e46119f29c5fe3b018888c24", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2e5d58b5-ca35-485b-969f-f73f725495ac": {"__data__": {"id_": "2e5d58b5-ca35-485b-969f-f73f725495ac", "embedding": null, "metadata": {"window": "24\n3.4.2 Correlation between scores . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "51596bd6-9f3b-4986-ae08-0f2ba67eb203", "node_type": "1", "metadata": {"window": ". 24\n3.4.2 Correlation between scores . . . . . . . . ", "original_text": ". "}, "hash": "f58dc24c2632684e912df904bf1d8a143192c627e444b639b47fb54670ec37d1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9c7fb20d-7ece-4f12-8497-7c61878d52e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9c7fb20d-7ece-4f12-8497-7c61878d52e6": {"__data__": {"id_": "9c7fb20d-7ece-4f12-8497-7c61878d52e6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2e5d58b5-ca35-485b-969f-f73f725495ac", "node_type": "1", "metadata": {"window": "24\n3.4.2 Correlation between scores . . . . . . . . . ", "original_text": ". "}, "hash": "094d15d9d4323c058dcfed0a3fc1c8e0ff9301b3e46119f29c5fe3b018888c24", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ce8eafae-149d-4fed-a046-d986dd294b08", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ce8eafae-149d-4fed-a046-d986dd294b08": {"__data__": {"id_": "ce8eafae-149d-4fed-a046-d986dd294b08", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9c7fb20d-7ece-4f12-8497-7c61878d52e6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1": {"__data__": {"id_": "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ce8eafae-149d-4fed-a046-d986dd294b08", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa44127a-443c-4bb2-aa8d-9456a87183f8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa44127a-443c-4bb2-aa8d-9456a87183f8": {"__data__": {"id_": "aa44127a-443c-4bb2-aa8d-9456a87183f8", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dff9eb92-be49-4a38-a041-75be8d101e4a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dff9eb92-be49-4a38-a041-75be8d101e4a": {"__data__": {"id_": "dff9eb92-be49-4a38-a041-75be8d101e4a", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa44127a-443c-4bb2-aa8d-9456a87183f8", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7972a5ce-1d08-422b-83d3-3defc34635c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7972a5ce-1d08-422b-83d3-3defc34635c4": {"__data__": {"id_": "7972a5ce-1d08-422b-83d3-3defc34635c4", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dff9eb92-be49-4a38-a041-75be8d101e4a", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5cd26bac-e963-44d8-b266-0ecd93c8f5f9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5cd26bac-e963-44d8-b266-0ecd93c8f5f9": {"__data__": {"id_": "5cd26bac-e963-44d8-b266-0ecd93c8f5f9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7972a5ce-1d08-422b-83d3-3defc34635c4", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "85726e01-c8bb-4b72-8133-f4322bb100b9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "85726e01-c8bb-4b72-8133-f4322bb100b9": {"__data__": {"id_": "85726e01-c8bb-4b72-8133-f4322bb100b9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5cd26bac-e963-44d8-b266-0ecd93c8f5f9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "92a0cb7e-7781-4f99-a9da-6e7d7234683c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "92a0cb7e-7781-4f99-a9da-6e7d7234683c": {"__data__": {"id_": "92a0cb7e-7781-4f99-a9da-6e7d7234683c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "85726e01-c8bb-4b72-8133-f4322bb100b9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b": {"__data__": {"id_": "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "92a0cb7e-7781-4f99-a9da-6e7d7234683c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5": {"__data__": {"id_": "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4a35bf98-122e-4660-b913-7811a812da0b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4a35bf98-122e-4660-b913-7811a812da0b": {"__data__": {"id_": "4a35bf98-122e-4660-b913-7811a812da0b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b3545d8a-1238-41bd-85c5-2b52f6a2638d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b3545d8a-1238-41bd-85c5-2b52f6a2638d": {"__data__": {"id_": "b3545d8a-1238-41bd-85c5-2b52f6a2638d", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4a35bf98-122e-4660-b913-7811a812da0b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "37e1c58c-75b5-41e5-9d8f-25053e7a7836", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "37e1c58c-75b5-41e5-9d8f-25053e7a7836": {"__data__": {"id_": "37e1c58c-75b5-41e5-9d8f-25053e7a7836", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b3545d8a-1238-41bd-85c5-2b52f6a2638d", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0ac5ead1-0b00-49f2-9863-50354bdcd934", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0ac5ead1-0b00-49f2-9863-50354bdcd934": {"__data__": {"id_": "0ac5ead1-0b00-49f2-9863-50354bdcd934", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "37e1c58c-75b5-41e5-9d8f-25053e7a7836", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "49603d20-a3ec-4981-b1e5-fcd5466fd31b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "49603d20-a3ec-4981-b1e5-fcd5466fd31b": {"__data__": {"id_": "49603d20-a3ec-4981-b1e5-fcd5466fd31b", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0ac5ead1-0b00-49f2-9863-50354bdcd934", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4c02a0d0-9eb2-419d-820e-1258c99ae8fb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4c02a0d0-9eb2-419d-820e-1258c99ae8fb": {"__data__": {"id_": "4c02a0d0-9eb2-419d-820e-1258c99ae8fb", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "49603d20-a3ec-4981-b1e5-fcd5466fd31b", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "61344edf-cb22-489d-8d2a-c4d71df6be50", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "61344edf-cb22-489d-8d2a-c4d71df6be50": {"__data__": {"id_": "61344edf-cb22-489d-8d2a-c4d71df6be50", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4c02a0d0-9eb2-419d-820e-1258c99ae8fb", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c7d58f76-0d5a-41f3-b0c7-dae162d95354", "node_type": "1", "metadata": {"window": ". . . . . . . . 25\n3.4.3 Visualization of ranked images . ", "original_text": ". "}, "hash": "1ab29086e98edf25696ef8fe0900e98f490c47e547b151fe3e59732a92f7b9c9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c7d58f76-0d5a-41f3-b0c7-dae162d95354": {"__data__": {"id_": "c7d58f76-0d5a-41f3-b0c7-dae162d95354", "embedding": null, "metadata": {"window": ". . . . . . . . 25\n3.4.3 Visualization of ranked images . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "61344edf-cb22-489d-8d2a-c4d71df6be50", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c71e28b5-35bc-4174-99c1-b62a8933aab0", "node_type": "1", "metadata": {"window": ". . . . . . . 25\n3.4.3 Visualization of ranked images . . ", "original_text": ". "}, "hash": "6dbc810bf9669ac35870478e6ce5b3f80edd2cdbac720adbb37921e13ffae287", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c71e28b5-35bc-4174-99c1-b62a8933aab0": {"__data__": {"id_": "c71e28b5-35bc-4174-99c1-b62a8933aab0", "embedding": null, "metadata": {"window": ". . . . . . . 25\n3.4.3 Visualization of ranked images . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c7d58f76-0d5a-41f3-b0c7-dae162d95354", "node_type": "1", "metadata": {"window": ". . . . . . . . 25\n3.4.3 Visualization of ranked images . ", "original_text": ". "}, "hash": "1ab29086e98edf25696ef8fe0900e98f490c47e547b151fe3e59732a92f7b9c9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eaef2d03-3d2e-4eb7-91e6-2a44756008fb", "node_type": "1", "metadata": {"window": ". . . . . . 25\n3.4.3 Visualization of ranked images . . . ", "original_text": ". "}, "hash": "aeecf3c4886b7730db7b3f6a2f80c21f854c551603243528b89cd09a0c628f68", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eaef2d03-3d2e-4eb7-91e6-2a44756008fb": {"__data__": {"id_": "eaef2d03-3d2e-4eb7-91e6-2a44756008fb", "embedding": null, "metadata": {"window": ". . . . . . 25\n3.4.3 Visualization of ranked images . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c71e28b5-35bc-4174-99c1-b62a8933aab0", "node_type": "1", "metadata": {"window": ". . . . . . . 25\n3.4.3 Visualization of ranked images . . ", "original_text": ". "}, "hash": "6dbc810bf9669ac35870478e6ce5b3f80edd2cdbac720adbb37921e13ffae287", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c", "node_type": "1", "metadata": {"window": ". . . . . 25\n3.4.3 Visualization of ranked images . . . . ", "original_text": ". "}, "hash": "c16b8fc5c9deb6455f34bddba0ef656e70408a5ab0f15d10cc407603c2405980", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c": {"__data__": {"id_": "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c", "embedding": null, "metadata": {"window": ". . . . . 25\n3.4.3 Visualization of ranked images . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eaef2d03-3d2e-4eb7-91e6-2a44756008fb", "node_type": "1", "metadata": {"window": ". . . . . . 25\n3.4.3 Visualization of ranked images . . . ", "original_text": ". "}, "hash": "aeecf3c4886b7730db7b3f6a2f80c21f854c551603243528b89cd09a0c628f68", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361", "node_type": "1", "metadata": {"window": ". . . . 25\n3.4.3 Visualization of ranked images . . . . . ", "original_text": "25\n3.4.3 Visualization of ranked images . "}, "hash": "a160490c2051972d5c9267b0a245eaa3362d56ef89bf39f482acfba50567ddf9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361": {"__data__": {"id_": "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361", "embedding": null, "metadata": {"window": ". . . . 25\n3.4.3 Visualization of ranked images . . . . . ", "original_text": "25\n3.4.3 Visualization of ranked images . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c", "node_type": "1", "metadata": {"window": ". . . . . 25\n3.4.3 Visualization of ranked images . . . . ", "original_text": ". "}, "hash": "c16b8fc5c9deb6455f34bddba0ef656e70408a5ab0f15d10cc407603c2405980", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b5b12580-b1f6-44a9-a84b-8c0cb919481a", "node_type": "1", "metadata": {"window": ". . . 25\n3.4.3 Visualization of ranked images . . . . . . ", "original_text": ". "}, "hash": "e4c5edebaff10a32c19143ad17a9ae07a20899d82c689053a4ca66f7e9b675d4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "25\n3.4.3 Visualization of ranked images . ", "mimetype": "text/plain", "start_char_idx": 1779, "end_char_idx": 1821, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b5b12580-b1f6-44a9-a84b-8c0cb919481a": {"__data__": {"id_": "b5b12580-b1f6-44a9-a84b-8c0cb919481a", "embedding": null, "metadata": {"window": ". . . 25\n3.4.3 Visualization of ranked images . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361", "node_type": "1", "metadata": {"window": ". . . . 25\n3.4.3 Visualization of ranked images . . . . . ", "original_text": "25\n3.4.3 Visualization of ranked images . "}, "hash": "a160490c2051972d5c9267b0a245eaa3362d56ef89bf39f482acfba50567ddf9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "32777b9b-e11c-46be-a408-4d6931b3ec90", "node_type": "1", "metadata": {"window": ". . 25\n3.4.3 Visualization of ranked images . . . . . . . ", "original_text": ". "}, "hash": "a7ef97388b9528119ff780a8989b32475fd8cef6b4a051414bf62c98c0976ea9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "32777b9b-e11c-46be-a408-4d6931b3ec90": {"__data__": {"id_": "32777b9b-e11c-46be-a408-4d6931b3ec90", "embedding": null, "metadata": {"window": ". . 25\n3.4.3 Visualization of ranked images . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b5b12580-b1f6-44a9-a84b-8c0cb919481a", "node_type": "1", "metadata": {"window": ". . . 25\n3.4.3 Visualization of ranked images . . . . . . ", "original_text": ". "}, "hash": "e4c5edebaff10a32c19143ad17a9ae07a20899d82c689053a4ca66f7e9b675d4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b16982ba-8d3b-44ab-abf9-80e7bfa2336f", "node_type": "1", "metadata": {"window": ". 25\n3.4.3 Visualization of ranked images . . . . . . . . ", "original_text": ". "}, "hash": "baf0e2cbc3e38c26b35fde6b88fdb832c1344ca2ed1c9cb2151f4df0d4684cf4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b16982ba-8d3b-44ab-abf9-80e7bfa2336f": {"__data__": {"id_": "b16982ba-8d3b-44ab-abf9-80e7bfa2336f", "embedding": null, "metadata": {"window": ". 25\n3.4.3 Visualization of ranked images . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "32777b9b-e11c-46be-a408-4d6931b3ec90", "node_type": "1", "metadata": {"window": ". . 25\n3.4.3 Visualization of ranked images . . . . . . . ", "original_text": ". "}, "hash": "a7ef97388b9528119ff780a8989b32475fd8cef6b4a051414bf62c98c0976ea9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1deec3be-1a85-413b-8f19-5855ace959f1", "node_type": "1", "metadata": {"window": "25\n3.4.3 Visualization of ranked images . . . . . . . . . ", "original_text": ". "}, "hash": "bf43ebb6e53069d70db763eac3cc6f4b6af727b1b7b225d27d0c996c052cd2d2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1deec3be-1a85-413b-8f19-5855ace959f1": {"__data__": {"id_": "1deec3be-1a85-413b-8f19-5855ace959f1", "embedding": null, "metadata": {"window": "25\n3.4.3 Visualization of ranked images . . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b16982ba-8d3b-44ab-abf9-80e7bfa2336f", "node_type": "1", "metadata": {"window": ". 25\n3.4.3 Visualization of ranked images . . . . . . . . ", "original_text": ". "}, "hash": "baf0e2cbc3e38c26b35fde6b88fdb832c1344ca2ed1c9cb2151f4df0d4684cf4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c3b8f589-e6a9-4d30-9336-5dc750073020", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c3b8f589-e6a9-4d30-9336-5dc750073020": {"__data__": {"id_": "c3b8f589-e6a9-4d30-9336-5dc750073020", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1deec3be-1a85-413b-8f19-5855ace959f1", "node_type": "1", "metadata": {"window": "25\n3.4.3 Visualization of ranked images . . . . . . . . . ", "original_text": ". "}, "hash": "bf43ebb6e53069d70db763eac3cc6f4b6af727b1b7b225d27d0c996c052cd2d2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1c7917b6-836b-45e8-b13e-bb0b75865073", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1c7917b6-836b-45e8-b13e-bb0b75865073": {"__data__": {"id_": "1c7917b6-836b-45e8-b13e-bb0b75865073", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c3b8f589-e6a9-4d30-9336-5dc750073020", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "95c4701b-a448-427c-8b5c-fa0ef38e5aa1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "95c4701b-a448-427c-8b5c-fa0ef38e5aa1": {"__data__": {"id_": "95c4701b-a448-427c-8b5c-fa0ef38e5aa1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1c7917b6-836b-45e8-b13e-bb0b75865073", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1": {"__data__": {"id_": "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "95c4701b-a448-427c-8b5c-fa0ef38e5aa1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1acacba0-04b5-42dd-8988-0f8922a683e9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1acacba0-04b5-42dd-8988-0f8922a683e9": {"__data__": {"id_": "1acacba0-04b5-42dd-8988-0f8922a683e9", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "920c5b5c-04fe-45b8-a89e-729379fbf001", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "920c5b5c-04fe-45b8-a89e-729379fbf001": {"__data__": {"id_": "920c5b5c-04fe-45b8-a89e-729379fbf001", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1acacba0-04b5-42dd-8988-0f8922a683e9", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1096dc7c-2770-4004-8e7b-62263bad0667", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1096dc7c-2770-4004-8e7b-62263bad0667": {"__data__": {"id_": "1096dc7c-2770-4004-8e7b-62263bad0667", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "920c5b5c-04fe-45b8-a89e-729379fbf001", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5b8c28f1-7f40-4db7-88f3-afee7af17880", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5b8c28f1-7f40-4db7-88f3-afee7af17880": {"__data__": {"id_": "5b8c28f1-7f40-4db7-88f3-afee7af17880", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1096dc7c-2770-4004-8e7b-62263bad0667", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d9b99f43-2498-483f-b04e-bed888bd8796", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d9b99f43-2498-483f-b04e-bed888bd8796": {"__data__": {"id_": "d9b99f43-2498-483f-b04e-bed888bd8796", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5b8c28f1-7f40-4db7-88f3-afee7af17880", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c03beec-36cf-4c0b-b997-7e4b6bab09cf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c03beec-36cf-4c0b-b997-7e4b6bab09cf": {"__data__": {"id_": "6c03beec-36cf-4c0b-b997-7e4b6bab09cf", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d9b99f43-2498-483f-b04e-bed888bd8796", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5": {"__data__": {"id_": "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c03beec-36cf-4c0b-b997-7e4b6bab09cf", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6": {"__data__": {"id_": "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2932113b-b43e-45f7-860c-fd6c431c4a07", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2932113b-b43e-45f7-860c-fd6c431c4a07": {"__data__": {"id_": "2932113b-b43e-45f7-860c-fd6c431c4a07", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9a05bf37-1625-4711-be55-81a1380b787e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9a05bf37-1625-4711-be55-81a1380b787e": {"__data__": {"id_": "9a05bf37-1625-4711-be55-81a1380b787e", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2932113b-b43e-45f7-860c-fd6c431c4a07", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "85d2d350-ef7c-47f0-9b93-c6d8cca2594c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "85d2d350-ef7c-47f0-9b93-c6d8cca2594c": {"__data__": {"id_": "85d2d350-ef7c-47f0-9b93-c6d8cca2594c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9a05bf37-1625-4711-be55-81a1380b787e", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c": {"__data__": {"id_": "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c", "embedding": null, "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "85d2d350-ef7c-47f0-9b93-c6d8cca2594c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b94db02f-479f-4c37-87d4-87c7c50170f0", "node_type": "1", "metadata": {"window": ". . . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. ", "original_text": ". "}, "hash": "e6a897b9060ed70a0989ee73a1f1c8d04294fd8888e896c326e57cf70d3b9ac8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b94db02f-479f-4c37-87d4-87c7c50170f0": {"__data__": {"id_": "b94db02f-479f-4c37-87d4-87c7c50170f0", "embedding": null, "metadata": {"window": ". . . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c", "node_type": "1", "metadata": {"window": ". . . . . . . . . ", "original_text": ". "}, "hash": "ac3e0553bf3fe304c29a68e50849cbb956a4e86b93e9d3b457a3100110b8086d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c1b894a9-7003-4f7a-a787-2b661cb3da7b", "node_type": "1", "metadata": {"window": ". . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. ", "original_text": ". "}, "hash": "ab637ef3177a66e61941fbdb7f35e5aebd20a3a572ebbfcb4cd1e7344b21545e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c1b894a9-7003-4f7a-a787-2b661cb3da7b": {"__data__": {"id_": "c1b894a9-7003-4f7a-a787-2b661cb3da7b", "embedding": null, "metadata": {"window": ". . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b94db02f-479f-4c37-87d4-87c7c50170f0", "node_type": "1", "metadata": {"window": ". . . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. ", "original_text": ". "}, "hash": "e6a897b9060ed70a0989ee73a1f1c8d04294fd8888e896c326e57cf70d3b9ac8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "16ea94b8-d9f8-477f-a23f-d638b9d09b34", "node_type": "1", "metadata": {"window": ". . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. ", "original_text": ". "}, "hash": "c7d81cc9a10cad7dab9fc15244290ba6d2f11e82e47a0347029e72e477d618e1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "16ea94b8-d9f8-477f-a23f-d638b9d09b34": {"__data__": {"id_": "16ea94b8-d9f8-477f-a23f-d638b9d09b34", "embedding": null, "metadata": {"window": ". . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c1b894a9-7003-4f7a-a787-2b661cb3da7b", "node_type": "1", "metadata": {"window": ". . . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. ", "original_text": ". "}, "hash": "ab637ef3177a66e61941fbdb7f35e5aebd20a3a572ebbfcb4cd1e7344b21545e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "83582d80-e9e8-4e4c-b926-96a556371f02", "node_type": "1", "metadata": {"window": ". . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. ", "original_text": ". "}, "hash": "db9666257e0e69f4189f10093dc156079068f168d196a4ec67ac77a4020b27ba", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "83582d80-e9e8-4e4c-b926-96a556371f02": {"__data__": {"id_": "83582d80-e9e8-4e4c-b926-96a556371f02", "embedding": null, "metadata": {"window": ". . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "16ea94b8-d9f8-477f-a23f-d638b9d09b34", "node_type": "1", "metadata": {"window": ". . . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. ", "original_text": ". "}, "hash": "c7d81cc9a10cad7dab9fc15244290ba6d2f11e82e47a0347029e72e477d618e1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c", "node_type": "1", "metadata": {"window": ". . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. ", "original_text": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. "}, "hash": "92b7ebd934315879d73bbe48fd4f523dd484622813141588eb66b01a00e25c2d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c": {"__data__": {"id_": "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c", "embedding": null, "metadata": {"window": ". . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. ", "original_text": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "83582d80-e9e8-4e4c-b926-96a556371f02", "node_type": "1", "metadata": {"window": ". . . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. ", "original_text": ". "}, "hash": "db9666257e0e69f4189f10093dc156079068f168d196a4ec67ac77a4020b27ba", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e5a8d498-6b22-4b49-985d-82c37585e9a3", "node_type": "1", "metadata": {"window": ". . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. ", "original_text": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. "}, "hash": "ed646248a86639a6156093ecaf1edf7531d3cfc470b243906710fb5a227e492c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. ", "mimetype": "text/plain", "start_char_idx": 1869, "end_char_idx": 2025, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e5a8d498-6b22-4b49-985d-82c37585e9a3": {"__data__": {"id_": "e5a8d498-6b22-4b49-985d-82c37585e9a3", "embedding": null, "metadata": {"window": ". . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. ", "original_text": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c", "node_type": "1", "metadata": {"window": ". . . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. ", "original_text": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. "}, "hash": "92b7ebd934315879d73bbe48fd4f523dd484622813141588eb66b01a00e25c2d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7", "node_type": "1", "metadata": {"window": ". . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. ", "original_text": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. "}, "hash": "f1cf65fb0a0067134e6ee20d12d0f1cdf4b996fb1fc8b9c756fcaeba456d2313", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. ", "mimetype": "text/plain", "start_char_idx": 2025, "end_char_idx": 2243, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7": {"__data__": {"id_": "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7", "embedding": null, "metadata": {"window": ". . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. ", "original_text": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e5a8d498-6b22-4b49-985d-82c37585e9a3", "node_type": "1", "metadata": {"window": ". . . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. ", "original_text": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. "}, "hash": "ed646248a86639a6156093ecaf1edf7531d3cfc470b243906710fb5a227e492c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "886e17c4-6041-4933-98c8-6fc49b3c9174", "node_type": "1", "metadata": {"window": ". 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n", "original_text": "The colonization\nof the American continent led to a similar ecological catastrophe. "}, "hash": "0e6fe682d4568132fafa8aa486c80729bcd713a731cb787c024b2e8c3cdcb276", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. ", "mimetype": "text/plain", "start_char_idx": 2243, "end_char_idx": 2467, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "886e17c4-6041-4933-98c8-6fc49b3c9174": {"__data__": {"id_": "886e17c4-6041-4933-98c8-6fc49b3c9174", "embedding": null, "metadata": {"window": ". 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n", "original_text": "The colonization\nof the American continent led to a similar ecological catastrophe. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7", "node_type": "1", "metadata": {"window": ". . 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. ", "original_text": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. "}, "hash": "f1cf65fb0a0067134e6ee20d12d0f1cdf4b996fb1fc8b9c756fcaeba456d2313", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d2ea5f50-ce5d-4a38-9780-da61dc90ee12", "node_type": "1", "metadata": {"window": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. ", "original_text": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. "}, "hash": "350a824a8d5630e015d27b3698bf19a159796315af02c85b3b511af7ef479e05", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The colonization\nof the American continent led to a similar ecological catastrophe. ", "mimetype": "text/plain", "start_char_idx": 2467, "end_char_idx": 2551, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d2ea5f50-ce5d-4a38-9780-da61dc90ee12": {"__data__": {"id_": "d2ea5f50-ce5d-4a38-9780-da61dc90ee12", "embedding": null, "metadata": {"window": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. ", "original_text": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "886e17c4-6041-4933-98c8-6fc49b3c9174", "node_type": "1", "metadata": {"window": ". 27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n", "original_text": "The colonization\nof the American continent led to a similar ecological catastrophe. "}, "hash": "0e6fe682d4568132fafa8aa486c80729bcd713a731cb787c024b2e8c3cdcb276", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "60f46972-4e33-46e3-8ecb-4d20e13c1a05", "node_type": "1", "metadata": {"window": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. ", "original_text": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. "}, "hash": "73842178ecfa8b1b8937c994ce5d175692524d62dec0d197f45d1554d76231f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. ", "mimetype": "text/plain", "start_char_idx": 2551, "end_char_idx": 2625, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "60f46972-4e33-46e3-8ecb-4d20e13c1a05": {"__data__": {"id_": "60f46972-4e33-46e3-8ecb-4d20e13c1a05", "embedding": null, "metadata": {"window": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. ", "original_text": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d2ea5f50-ce5d-4a38-9780-da61dc90ee12", "node_type": "1", "metadata": {"window": "27\n4 Conclusion 30\nBibliography 31\n1\n\nChapter 1\nIntroduction\nT\nhe history of Homo Sapiensas an ecological killer begins earlier than many people imag-\nine. Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. ", "original_text": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. "}, "hash": "350a824a8d5630e015d27b3698bf19a159796315af02c85b3b511af7ef479e05", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a28da878-830d-4c9f-aa01-e3c48a9339ae", "node_type": "1", "metadata": {"window": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. ", "original_text": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. "}, "hash": "4b6a6c1144eeb461ae6dda03179528b5c008c1ab5ede51b7fec1d76c48ec89a1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. ", "mimetype": "text/plain", "start_char_idx": 2625, "end_char_idx": 2702, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a28da878-830d-4c9f-aa01-e3c48a9339ae": {"__data__": {"id_": "a28da878-830d-4c9f-aa01-e3c48a9339ae", "embedding": null, "metadata": {"window": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. ", "original_text": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "60f46972-4e33-46e3-8ecb-4d20e13c1a05", "node_type": "1", "metadata": {"window": "Around 50,000 years ago, Homo Sapiens first set foot in Australia; within a few\nthousand years, of the twenty-four genera of Australian land animals weighing fifty kilo-\ngrams or more, twenty-three became extinct [1]. These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. ", "original_text": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. "}, "hash": "73842178ecfa8b1b8937c994ce5d175692524d62dec0d197f45d1554d76231f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1575b6dd-14bf-4610-b839-01130495ea45", "node_type": "1", "metadata": {"window": "The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. ", "original_text": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n"}, "hash": "5b458c253a01c763f26cfbb3ccf80aadd49520d79f1c18d2fbf8e96bf90f8dc8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. ", "mimetype": "text/plain", "start_char_idx": 2702, "end_char_idx": 2810, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1575b6dd-14bf-4610-b839-01130495ea45": {"__data__": {"id_": "1575b6dd-14bf-4610-b839-01130495ea45", "embedding": null, "metadata": {"window": "The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. ", "original_text": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a28da878-830d-4c9f-aa01-e3c48a9339ae", "node_type": "1", "metadata": {"window": "These includedThylacoleo carnifex, a marsupial\nlion that was the continent\u2019s largest predator; Genyornis newtoni, a flightless bird over two me-\nters in height; and Procoptodon goliah, a giant kangaroo weighing over 200 kg. The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. ", "original_text": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. "}, "hash": "4b6a6c1144eeb461ae6dda03179528b5c008c1ab5ede51b7fec1d76c48ec89a1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74", "node_type": "1", "metadata": {"window": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n", "original_text": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. "}, "hash": "d29c86a9ba7ba1ae90895da31e37da9e860f7a65657cf4a5539522d4421cd642", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n", "mimetype": "text/plain", "start_char_idx": 2810, "end_char_idx": 3056, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74": {"__data__": {"id_": "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74", "embedding": null, "metadata": {"window": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n", "original_text": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1575b6dd-14bf-4610-b839-01130495ea45", "node_type": "1", "metadata": {"window": "The colonization\nof the American continent led to a similar ecological catastrophe. Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. ", "original_text": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n"}, "hash": "5b458c253a01c763f26cfbb3ccf80aadd49520d79f1c18d2fbf8e96bf90f8dc8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "325d4916-0f12-45ff-b0f1-3a3941054eb7", "node_type": "1", "metadata": {"window": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. ", "original_text": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. "}, "hash": "059b3a30c7c32c2f6da6dc1b3bb829ba25f672f0d84b60fef3826169ca38d16d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. ", "mimetype": "text/plain", "start_char_idx": 3056, "end_char_idx": 3123, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "325d4916-0f12-45ff-b0f1-3a3941054eb7": {"__data__": {"id_": "325d4916-0f12-45ff-b0f1-3a3941054eb7", "embedding": null, "metadata": {"window": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. ", "original_text": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74", "node_type": "1", "metadata": {"window": "Homo Sapiensstarted migrat-\ning south from Alaska about 14,000 years ago. In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n", "original_text": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. "}, "hash": "d29c86a9ba7ba1ae90895da31e37da9e860f7a65657cf4a5539522d4421cd642", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "439f34f6-6d92-46e3-bdf6-715286b6bde2", "node_type": "1", "metadata": {"window": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. ", "original_text": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. "}, "hash": "3d3bc0d474407fe12e2a454f5957c690e5187d56a6793fb7201312a4f57443d9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. ", "mimetype": "text/plain", "start_char_idx": 3123, "end_char_idx": 3388, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "439f34f6-6d92-46e3-bdf6-715286b6bde2": {"__data__": {"id_": "439f34f6-6d92-46e3-bdf6-715286b6bde2", "embedding": null, "metadata": {"window": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. ", "original_text": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "325d4916-0f12-45ff-b0f1-3a3941054eb7", "node_type": "1", "metadata": {"window": "In less than two millennia, humans had reached\nthe continent\u2019s southern tip. Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. ", "original_text": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. "}, "hash": "059b3a30c7c32c2f6da6dc1b3bb829ba25f672f0d84b60fef3826169ca38d16d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe741f39-589a-41f3-bedf-f7c0f392e16a", "node_type": "1", "metadata": {"window": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. ", "original_text": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. "}, "hash": "58602806d109ade1d1bdc4497ab4fdf384ce94f84f5e1aa9ff38d77b02a7e50f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. ", "mimetype": "text/plain", "start_char_idx": 3388, "end_char_idx": 3494, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe741f39-589a-41f3-bedf-f7c0f392e16a": {"__data__": {"id_": "fe741f39-589a-41f3-bedf-f7c0f392e16a", "embedding": null, "metadata": {"window": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. ", "original_text": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "439f34f6-6d92-46e3-bdf6-715286b6bde2", "node_type": "1", "metadata": {"window": "Concurrently, 72% and 83% of megafaunal genera become extinct\nin North and South America, respectively [2]. While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. ", "original_text": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. "}, "hash": "3d3bc0d474407fe12e2a454f5957c690e5187d56a6793fb7201312a4f57443d9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "93f78c61-9740-413d-b768-9912cd3161ef", "node_type": "1", "metadata": {"window": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. ", "original_text": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n"}, "hash": "addb794dcd219eda208d43a83acf08006fa6f6a1f0bc3ad8ab2b1695ceba435c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. ", "mimetype": "text/plain", "start_char_idx": 3494, "end_char_idx": 3691, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "93f78c61-9740-413d-b768-9912cd3161ef": {"__data__": {"id_": "93f78c61-9740-413d-b768-9912cd3161ef", "embedding": null, "metadata": {"window": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. ", "original_text": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe741f39-589a-41f3-bedf-f7c0f392e16a", "node_type": "1", "metadata": {"window": "While the relative importance between (natural)\nclimate change and humans is still debated, the latest research suggests that the latter were the\nprincipal or necessary driver of the major extinction events in Australia and South America\n[3, 4].\n Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. ", "original_text": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. "}, "hash": "58602806d109ade1d1bdc4497ab4fdf384ce94f84f5e1aa9ff38d77b02a7e50f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "88eba853-64cb-46ff-9745-fc38c9666b66", "node_type": "1", "metadata": {"window": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. ", "original_text": "Insects are the most diverse group of animals. "}, "hash": "7c179654a658fb2569179e10da2fec9b83c0b1500ae73ca4e4a750b33c25c527", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n", "mimetype": "text/plain", "start_char_idx": 3691, "end_char_idx": 3829, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "88eba853-64cb-46ff-9745-fc38c9666b66": {"__data__": {"id_": "88eba853-64cb-46ff-9745-fc38c9666b66", "embedding": null, "metadata": {"window": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. ", "original_text": "Insects are the most diverse group of animals. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "93f78c61-9740-413d-b768-9912cd3161ef", "node_type": "1", "metadata": {"window": "Today, the biosphere is undergoing its 6th mass extinction [5, 6]. A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. ", "original_text": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n"}, "hash": "addb794dcd219eda208d43a83acf08006fa6f6a1f0bc3ad8ab2b1695ceba435c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "805833df-bcea-4177-93d4-81a549b5b783", "node_type": "1", "metadata": {"window": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n", "original_text": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. "}, "hash": "43c844742f93102b03a24feb395219e432f5905601ff84de4d5cb3bd3eef4ae7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Insects are the most diverse group of animals. ", "mimetype": "text/plain", "start_char_idx": 3829, "end_char_idx": 3876, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "805833df-bcea-4177-93d4-81a549b5b783": {"__data__": {"id_": "805833df-bcea-4177-93d4-81a549b5b783", "embedding": null, "metadata": {"window": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n", "original_text": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "88eba853-64cb-46ff-9745-fc38c9666b66", "node_type": "1", "metadata": {"window": "A mass extinction occurs\nwhen the Earth loses more than three-quarters of its species in a geologically short interval [7];\nthe last one marked the end of the dinosaurs\u2019 era, 66 million years ago, and was caused by a\n10km-wide asteroid colliding with planet Earth. In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. ", "original_text": "Insects are the most diverse group of animals. "}, "hash": "7c179654a658fb2569179e10da2fec9b83c0b1500ae73ca4e4a750b33c25c527", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "452bc98c-b51c-48ec-b3ee-b380a6694e8c", "node_type": "1", "metadata": {"window": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. ", "original_text": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. "}, "hash": "64c85fcb03d0212c5384268d370e9f49d2e43238b76101155a28a028a1355e1e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. ", "mimetype": "text/plain", "start_char_idx": 3876, "end_char_idx": 4080, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "452bc98c-b51c-48ec-b3ee-b380a6694e8c": {"__data__": {"id_": "452bc98c-b51c-48ec-b3ee-b380a6694e8c", "embedding": null, "metadata": {"window": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. ", "original_text": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "805833df-bcea-4177-93d4-81a549b5b783", "node_type": "1", "metadata": {"window": "In contrast with all the previous, today\u2019s mass\nextinction is mostly driven by one species, Homo Sapiens. In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n", "original_text": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. "}, "hash": "43c844742f93102b03a24feb395219e432f5905601ff84de4d5cb3bd3eef4ae7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d3df5068-0141-4827-b583-b773ae1f5d9d", "node_type": "1", "metadata": {"window": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. ", "original_text": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. "}, "hash": "d66e0e4217f65dafd2d002b74ba0148b87fd9cc988494cdc70ecbb932f94c49c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. ", "mimetype": "text/plain", "start_char_idx": 4080, "end_char_idx": 4183, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d3df5068-0141-4827-b583-b773ae1f5d9d": {"__data__": {"id_": "d3df5068-0141-4827-b583-b773ae1f5d9d", "embedding": null, "metadata": {"window": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. ", "original_text": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "452bc98c-b51c-48ec-b3ee-b380a6694e8c", "node_type": "1", "metadata": {"window": "In fact, humans are responsible for\nall the major immediate causes of biotic destruction: habitat conversion, climate disruption,\noverexploitation, toxification, species invasions and disease [5]. The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. ", "original_text": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. "}, "hash": "64c85fcb03d0212c5384268d370e9f49d2e43238b76101155a28a028a1355e1e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f2720dc3-4669-4a19-8931-0cb9f4aedfc8", "node_type": "1", "metadata": {"window": "Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. ", "original_text": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. "}, "hash": "2a2344023effda908fc40ddecb793ab8a2b5939303bac71b9178cba02b57bce2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. ", "mimetype": "text/plain", "start_char_idx": 4183, "end_char_idx": 4325, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f2720dc3-4669-4a19-8931-0cb9f4aedfc8": {"__data__": {"id_": "f2720dc3-4669-4a19-8931-0cb9f4aedfc8", "embedding": null, "metadata": {"window": "Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. ", "original_text": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d3df5068-0141-4827-b583-b773ae1f5d9d", "node_type": "1", "metadata": {"window": "The extinctions span numer-\nous species of plants [8] and animals, including mammals, birds, reptiles, amphibians, fish, and\ninsects [9].\n Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. ", "original_text": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. "}, "hash": "d66e0e4217f65dafd2d002b74ba0148b87fd9cc988494cdc70ecbb932f94c49c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825", "node_type": "1", "metadata": {"window": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. ", "original_text": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n"}, "hash": "dd9082f15f8096381b1e8472f7030328e8be414d19c67e03851d5a071507c008", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. ", "mimetype": "text/plain", "start_char_idx": 4325, "end_char_idx": 4616, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825": {"__data__": {"id_": "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825", "embedding": null, "metadata": {"window": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. ", "original_text": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f2720dc3-4669-4a19-8931-0cb9f4aedfc8", "node_type": "1", "metadata": {"window": "Insects are the most diverse group of animals. They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. ", "original_text": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. "}, "hash": "2a2344023effda908fc40ddecb793ab8a2b5939303bac71b9178cba02b57bce2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6b94705c-6231-414b-8837-306425a652e2", "node_type": "1", "metadata": {"window": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n", "original_text": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. "}, "hash": "c6f7cbc28215bb6800fc645100395bdd5c16847f0fd6a7022404e6ee238266d9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n", "mimetype": "text/plain", "start_char_idx": 4616, "end_char_idx": 4789, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6b94705c-6231-414b-8837-306425a652e2": {"__data__": {"id_": "6b94705c-6231-414b-8837-306425a652e2", "embedding": null, "metadata": {"window": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n", "original_text": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825", "node_type": "1", "metadata": {"window": "They include more than a million described\nspecies and represent more than half of all known living organisms [10]; moreover, it is esti-\nmated that 80% of insect species are still to be discovered [11]. Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. ", "original_text": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n"}, "hash": "dd9082f15f8096381b1e8472f7030328e8be414d19c67e03851d5a071507c008", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "db3e8b23-f911-483b-907c-e2034c2c6619", "node_type": "1", "metadata": {"window": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. ", "original_text": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. "}, "hash": "b9aa978c584594660fd10e2d298734d9130583a73fbec914b840fa11f44836b1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. ", "mimetype": "text/plain", "start_char_idx": 4789, "end_char_idx": 5020, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "db3e8b23-f911-483b-907c-e2034c2c6619": {"__data__": {"id_": "db3e8b23-f911-483b-907c-e2034c2c6619", "embedding": null, "metadata": {"window": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. ", "original_text": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6b94705c-6231-414b-8837-306425a652e2", "node_type": "1", "metadata": {"window": "Recent studies have demon-\nstrated alarming rates of insect diversity and abundance loss [12, 13, 14]. However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n", "original_text": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. "}, "hash": "c6f7cbc28215bb6800fc645100395bdd5c16847f0fd6a7022404e6ee238266d9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "444806d2-21e8-4dac-81da-2d64fb82874d", "node_type": "1", "metadata": {"window": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. ", "original_text": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. "}, "hash": "8d31b0ef65322abc1c72fc7f4926115a1b7e71b2b1e2081e5ed507b8cff5344d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. ", "mimetype": "text/plain", "start_char_idx": 5020, "end_char_idx": 5217, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "444806d2-21e8-4dac-81da-2d64fb82874d": {"__data__": {"id_": "444806d2-21e8-4dac-81da-2d64fb82874d", "embedding": null, "metadata": {"window": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. ", "original_text": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "db3e8b23-f911-483b-907c-e2034c2c6619", "node_type": "1", "metadata": {"window": "However, the data\non changes in insect species diversity and abundance has substantial taxonomic, spatial, and\ntemporal biases and gaps [15]. A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. ", "original_text": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. "}, "hash": "b9aa978c584594660fd10e2d298734d9130583a73fbec914b840fa11f44836b1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4738d7db-d5a4-418b-87d8-d339f43aaa9d", "node_type": "1", "metadata": {"window": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. ", "original_text": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. "}, "hash": "f43bfafe3cf994c1379433990d95bf60ca14c9f72908a623aa20180da9081463", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. ", "mimetype": "text/plain", "start_char_idx": 5217, "end_char_idx": 5360, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4738d7db-d5a4-418b-87d8-d339f43aaa9d": {"__data__": {"id_": "4738d7db-d5a4-418b-87d8-d339f43aaa9d", "embedding": null, "metadata": {"window": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. ", "original_text": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "444806d2-21e8-4dac-81da-2d64fb82874d", "node_type": "1", "metadata": {"window": "A major reason for these short-falls is the inherent difficulty\nof identifying insects: expert knowledge is necessary to classify all insects collected in a trap,\nwhich makes this approach time- or cost-prohibitive to scale, especially as insect identification\nexpertise is in decline [16]. Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. ", "original_text": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. "}, "hash": "8d31b0ef65322abc1c72fc7f4926115a1b7e71b2b1e2081e5ed507b8cff5344d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9492e792-71ba-48a9-ad92-d107fec88cd4", "node_type": "1", "metadata": {"window": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. ", "original_text": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n"}, "hash": "77706e6fd3b7e9d664dbc8b777a2b604214424a5edb829555b48bd339c2601de", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. ", "mimetype": "text/plain", "start_char_idx": 5360, "end_char_idx": 5511, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9492e792-71ba-48a9-ad92-d107fec88cd4": {"__data__": {"id_": "9492e792-71ba-48a9-ad92-d107fec88cd4", "embedding": null, "metadata": {"window": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. ", "original_text": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4738d7db-d5a4-418b-87d8-d339f43aaa9d", "node_type": "1", "metadata": {"window": "Using indicator species can be an effective approach to sidestep this\nproblem, but doing so often results in inadequate knowledge and compromised measures of\ninterest [17].\n 2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. ", "original_text": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. "}, "hash": "f43bfafe3cf994c1379433990d95bf60ca14c9f72908a623aa20180da9081463", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2cae41a0-1340-4553-8c9c-2bea5804282e", "node_type": "1", "metadata": {"window": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. ", "original_text": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. "}, "hash": "b2a5b75b1fe05b11b1e8c241312136e431d5a5a8b94d3a304b7182aef81c2a7a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n", "mimetype": "text/plain", "start_char_idx": 5511, "end_char_idx": 5699, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2cae41a0-1340-4553-8c9c-2bea5804282e": {"__data__": {"id_": "2cae41a0-1340-4553-8c9c-2bea5804282e", "embedding": null, "metadata": {"window": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. ", "original_text": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9492e792-71ba-48a9-ad92-d107fec88cd4", "node_type": "1", "metadata": {"window": "2\n\nIn the last decade, another approach has emerged in species monitoring studies in order to\ndrastically reduce the dependence on manual labor by automatically processing large amounts\nof collected data: the use of deep learning. As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. ", "original_text": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n"}, "hash": "77706e6fd3b7e9d664dbc8b777a2b604214424a5edb829555b48bd339c2601de", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "821d7380-66d7-4bda-bee8-5a015eb5274b", "node_type": "1", "metadata": {"window": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. ", "original_text": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. "}, "hash": "a50101756704dae34bcfdc115e880948f75292ed94609bb92cfda093d61552e1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. ", "mimetype": "text/plain", "start_char_idx": 5699, "end_char_idx": 5976, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "821d7380-66d7-4bda-bee8-5a015eb5274b": {"__data__": {"id_": "821d7380-66d7-4bda-bee8-5a015eb5274b", "embedding": null, "metadata": {"window": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. ", "original_text": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2cae41a0-1340-4553-8c9c-2bea5804282e", "node_type": "1", "metadata": {"window": "As part of this trend, in 2021, a team of researchers\nin Denmark published their work on a novel, automatic light trap to monitor moths using\ncomputer vision-based tracking and deep learning [18]. Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. ", "original_text": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. "}, "hash": "b2a5b75b1fe05b11b1e8c241312136e431d5a5a8b94d3a304b7182aef81c2a7a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "64948313-8678-4ec7-9b72-e4efde89c47e", "node_type": "1", "metadata": {"window": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. ", "original_text": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. "}, "hash": "1ac6187563cf16ce6b9c4bb4aa754ea55cc569ff5f2b5d643b0ee6866fdac11e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. ", "mimetype": "text/plain", "start_char_idx": 5976, "end_char_idx": 6133, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "64948313-8678-4ec7-9b72-e4efde89c47e": {"__data__": {"id_": "64948313-8678-4ec7-9b72-e4efde89c47e", "embedding": null, "metadata": {"window": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. ", "original_text": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "821d7380-66d7-4bda-bee8-5a015eb5274b", "node_type": "1", "metadata": {"window": "Moths make up the vast majority of\nthe orderLepidoptera, which by itself accounts for around 10% of all described species of living\norganisms. Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. ", "original_text": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. "}, "hash": "a50101756704dae34bcfdc115e880948f75292ed94609bb92cfda093d61552e1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3014e910-a107-44bd-96f1-01ca21a4162e", "node_type": "1", "metadata": {"window": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n", "original_text": "The images\nare processed off-line through a pipeline that involves four steps. "}, "hash": "282eae1cd6b028ec44fd3953e83c127179e677cea90d9eea1d507f1c60658090", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. ", "mimetype": "text/plain", "start_char_idx": 6133, "end_char_idx": 6241, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3014e910-a107-44bd-96f1-01ca21a4162e": {"__data__": {"id_": "3014e910-a107-44bd-96f1-01ca21a4162e", "embedding": null, "metadata": {"window": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n", "original_text": "The images\nare processed off-line through a pipeline that involves four steps. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "64948313-8678-4ec7-9b72-e4efde89c47e", "node_type": "1", "metadata": {"window": "Moths are important as pollinators, herbivores and prey; as such, changes in the\nabundance of moths could have cascading effects through the food web. Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. ", "original_text": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. "}, "hash": "1ac6187563cf16ce6b9c4bb4aa754ea55cc569ff5f2b5d643b0ee6866fdac11e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d6446025-b127-41e4-8ca6-bdb54cb306e7", "node_type": "1", "metadata": {"window": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. ", "original_text": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. "}, "hash": "0c469989f08997b6252ecf32781ae7b9395e959f8624fb2c3acac60b0627fe6c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The images\nare processed off-line through a pipeline that involves four steps. ", "mimetype": "text/plain", "start_char_idx": 6241, "end_char_idx": 6320, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d6446025-b127-41e4-8ca6-bdb54cb306e7": {"__data__": {"id_": "d6446025-b127-41e4-8ca6-bdb54cb306e7", "embedding": null, "metadata": {"window": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. ", "original_text": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3014e910-a107-44bd-96f1-01ca21a4162e", "node_type": "1", "metadata": {"window": "Additionally, some of\nthe most damaging pest species in agriculture and forestry are also moths [19, 20], suggesting\nthey are a very relevant group of insects to monitor more effectively.\n As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n", "original_text": "The images\nare processed off-line through a pipeline that involves four steps. "}, "hash": "282eae1cd6b028ec44fd3953e83c127179e677cea90d9eea1d507f1c60658090", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b46503b1-bae2-491a-9ff0-68e8a87e97d1", "node_type": "1", "metadata": {"window": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. ", "original_text": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. "}, "hash": "0f8b576908f2ff7f09e7d9d72194e01eac2da2a861e533f5b1cf4b3215e68e24", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. ", "mimetype": "text/plain", "start_char_idx": 6320, "end_char_idx": 6560, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b46503b1-bae2-491a-9ff0-68e8a87e97d1": {"__data__": {"id_": "b46503b1-bae2-491a-9ff0-68e8a87e97d1", "embedding": null, "metadata": {"window": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. ", "original_text": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d6446025-b127-41e4-8ca6-bdb54cb306e7", "node_type": "1", "metadata": {"window": "As depicted in Fig.1.1, the system presented in [18] consists of a UV light to attract live moths\nduring night hours, a backlit white screen for the moths to rest on, a high-resolution web cam-\nera with a light ring, a computer and a powered junction box with DC-DC converter. A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. ", "original_text": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. "}, "hash": "0c469989f08997b6252ecf32781ae7b9395e959f8624fb2c3acac60b0627fe6c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e", "node_type": "1", "metadata": {"window": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n", "original_text": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. "}, "hash": "38aacfc1aa80eb54716d969288efb91cdf788bfcde69431c76172ac1a0d5a564", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. ", "mimetype": "text/plain", "start_char_idx": 6560, "end_char_idx": 6678, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e": {"__data__": {"id_": "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e", "embedding": null, "metadata": {"window": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n", "original_text": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b46503b1-bae2-491a-9ff0-68e8a87e97d1", "node_type": "1", "metadata": {"window": "A se-\nquence of images is captured and stored on a hard drive whenever a change within the cam-\nera field of view is detected by the computer vision system. On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. ", "original_text": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. "}, "hash": "0f8b576908f2ff7f09e7d9d72194e01eac2da2a861e533f5b1cf4b3215e68e24", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "38af156f-141a-42f4-b5a7-a06e5cc94324", "node_type": "1", "metadata": {"window": "The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. ", "original_text": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n"}, "hash": "eb132a180a2797cf926198b67a1d88f45c209d293ade55c24df87fc58da896c8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. ", "mimetype": "text/plain", "start_char_idx": 6678, "end_char_idx": 6877, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "38af156f-141a-42f4-b5a7-a06e5cc94324": {"__data__": {"id_": "38af156f-141a-42f4-b5a7-a06e5cc94324", "embedding": null, "metadata": {"window": "The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. ", "original_text": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e", "node_type": "1", "metadata": {"window": "On warm summer nights with\na high level of insect activity, more than 20,000 images are captured per night. The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n", "original_text": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. "}, "hash": "38aacfc1aa80eb54716d969288efb91cdf788bfcde69431c76172ac1a0d5a564", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "feef5e1f-17a7-4f08-bc6e-d44dacd74466", "node_type": "1", "metadata": {"window": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. ", "original_text": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. "}, "hash": "5a4e3b453cf3d06d4315d60d2443c437bdc808f508189b5ded547ea048613b19", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n", "mimetype": "text/plain", "start_char_idx": 6877, "end_char_idx": 6966, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "feef5e1f-17a7-4f08-bc6e-d44dacd74466": {"__data__": {"id_": "feef5e1f-17a7-4f08-bc6e-d44dacd74466", "embedding": null, "metadata": {"window": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. ", "original_text": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "38af156f-141a-42f4-b5a7-a06e5cc94324", "node_type": "1", "metadata": {"window": "The images\nare processed off-line through a pipeline that involves four steps. First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. ", "original_text": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n"}, "hash": "eb132a180a2797cf926198b67a1d88f45c209d293ade55c24df87fc58da896c8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6e440c97-48ed-4abe-a86e-cc2f85db3302", "node_type": "1", "metadata": {"window": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. ", "original_text": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. "}, "hash": "854ae2856e43d9eb2a972dfbc984c27ce4eaf10c6bec946e6ab489f32154ff37", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. ", "mimetype": "text/plain", "start_char_idx": 6966, "end_char_idx": 7092, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6e440c97-48ed-4abe-a86e-cc2f85db3302": {"__data__": {"id_": "6e440c97-48ed-4abe-a86e-cc2f85db3302", "embedding": null, "metadata": {"window": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. ", "original_text": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "feef5e1f-17a7-4f08-bc6e-d44dacd74466", "node_type": "1", "metadata": {"window": "First, object detection is\nperformed with classic computer vision techniques: Otsu\u2019s method to separate foreground\nfrom background, morphological operations to filter out small noisy blobs and close blobs,\nand connected-component labeling. Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. ", "original_text": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. "}, "hash": "5a4e3b453cf3d06d4315d60d2443c437bdc808f508189b5ded547ea048613b19", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a847bbfb-3bdd-4a42-9f70-93f7c3e01045", "node_type": "1", "metadata": {"window": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. ", "original_text": "From [18].\n"}, "hash": "b24f1069a1e3ac69ebc62d4523a01696120a47a00a64fa5f96e0c488520b6f89", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. ", "mimetype": "text/plain", "start_char_idx": 7092, "end_char_idx": 7236, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a847bbfb-3bdd-4a42-9f70-93f7c3e01045": {"__data__": {"id_": "a847bbfb-3bdd-4a42-9f70-93f7c3e01045", "embedding": null, "metadata": {"window": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. ", "original_text": "From [18].\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6e440c97-48ed-4abe-a86e-cc2f85db3302", "node_type": "1", "metadata": {"window": "Second, tracking is used to ensure that each insect is only\ncounted once during its stay in the camera field of view. Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. ", "original_text": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. "}, "hash": "854ae2856e43d9eb2a972dfbc984c27ce4eaf10c6bec946e6ab489f32154ff37", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c5599d78-d420-4c6c-b2cb-34664566ea98", "node_type": "1", "metadata": {"window": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. ", "original_text": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. "}, "hash": "499443794ccedb84fa8660e19122219b33843657291d3f11b3422db4fb1439ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "From [18].\n", "mimetype": "text/plain", "start_char_idx": 7236, "end_char_idx": 7247, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c5599d78-d420-4c6c-b2cb-34664566ea98": {"__data__": {"id_": "c5599d78-d420-4c6c-b2cb-34664566ea98", "embedding": null, "metadata": {"window": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. ", "original_text": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a847bbfb-3bdd-4a42-9f70-93f7c3e01045", "node_type": "1", "metadata": {"window": "Third, each insect track is classified\nthrough a CNN into ten different classes, representing frequently observed species, groups of\nvery similar species, or false object detections without insects. Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. ", "original_text": "From [18].\n"}, "hash": "b24f1069a1e3ac69ebc62d4523a01696120a47a00a64fa5f96e0c488520b6f89", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "04e5553b-1e36-4398-a203-82cab3a97862", "node_type": "1", "metadata": {"window": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. ", "original_text": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. "}, "hash": "b8b804108ef142ec32598ee6bd986a67e1735c70af42dd67ed98fdf84a9e5ceb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. ", "mimetype": "text/plain", "start_char_idx": 7247, "end_char_idx": 7434, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "04e5553b-1e36-4398-a203-82cab3a97862": {"__data__": {"id_": "04e5553b-1e36-4398-a203-82cab3a97862", "embedding": null, "metadata": {"window": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. ", "original_text": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c5599d78-d420-4c6c-b2cb-34664566ea98", "node_type": "1", "metadata": {"window": "Fourth, a summary of the indi-\nviduals detected and tracked by the algorithm is derived.\n Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. ", "original_text": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. "}, "hash": "499443794ccedb84fa8660e19122219b33843657291d3f11b3422db4fb1439ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb", "node_type": "1", "metadata": {"window": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n", "original_text": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. "}, "hash": "baa89403c84e989c8a3b155af98f5c2ac68fcab6997f1795efb80ea0d301a674", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. ", "mimetype": "text/plain", "start_char_idx": 7434, "end_char_idx": 7597, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb": {"__data__": {"id_": "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb", "embedding": null, "metadata": {"window": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n", "original_text": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "04e5553b-1e36-4398-a203-82cab3a97862", "node_type": "1", "metadata": {"window": "Figure 1.1: The portable light trap with a light table, a white sheet, and UV light to attract live\nmoths during night hours. The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. ", "original_text": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. "}, "hash": "b8b804108ef142ec32598ee6bd986a67e1735c70af42dd67ed98fdf84a9e5ceb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "499f9f47-a0fd-4700-8766-2142378586f3", "node_type": "1", "metadata": {"window": "From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. ", "original_text": "Nonetheless, there is vast room for improvement. "}, "hash": "e8ff644bb6067e791fca938b2a80c79441cf0df37c109ce03c9a66610c788f69", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. ", "mimetype": "text/plain", "start_char_idx": 7597, "end_char_idx": 7855, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "499f9f47-a0fd-4700-8766-2142378586f3": {"__data__": {"id_": "499f9f47-a0fd-4700-8766-2142378586f3", "embedding": null, "metadata": {"window": "From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. ", "original_text": "Nonetheless, there is vast room for improvement. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb", "node_type": "1", "metadata": {"window": "The computer vision system consists of a light ring, a camera with\na computer and electronics, and a powered junction box with DC-DC converter. From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n", "original_text": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. "}, "hash": "baa89403c84e989c8a3b155af98f5c2ac68fcab6997f1795efb80ea0d301a674", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17b6d99e-9d9b-4f01-9dc2-1678807a7542", "node_type": "1", "metadata": {"window": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. ", "original_text": "Around\nthe same time the research in [18] was published, prof. "}, "hash": "fca603c71521c44d13e08cc2697762192fce57cc7a9002b4bcf468e3e72d1791", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Nonetheless, there is vast room for improvement. ", "mimetype": "text/plain", "start_char_idx": 7855, "end_char_idx": 7904, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17b6d99e-9d9b-4f01-9dc2-1678807a7542": {"__data__": {"id_": "17b6d99e-9d9b-4f01-9dc2-1678807a7542", "embedding": null, "metadata": {"window": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. ", "original_text": "Around\nthe same time the research in [18] was published, prof. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "499f9f47-a0fd-4700-8766-2142378586f3", "node_type": "1", "metadata": {"window": "From [18].\n Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. ", "original_text": "Nonetheless, there is vast room for improvement. "}, "hash": "e8ff644bb6067e791fca938b2a80c79441cf0df37c109ce03c9a66610c788f69", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "84361864-b821-4556-a1a5-a31862178d2e", "node_type": "1", "metadata": {"window": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. ", "original_text": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. "}, "hash": "55e1411a5a3c0ac2fe8da38b145846810be349d98acab78cdee18b8ffb3d2982", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Around\nthe same time the research in [18] was published, prof. ", "mimetype": "text/plain", "start_char_idx": 7904, "end_char_idx": 7967, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "84361864-b821-4556-a1a5-a31862178d2e": {"__data__": {"id_": "84361864-b821-4556-a1a5-a31862178d2e", "embedding": null, "metadata": {"window": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. ", "original_text": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17b6d99e-9d9b-4f01-9dc2-1678807a7542", "node_type": "1", "metadata": {"window": "Naturally, moths that fly in and out of the camera field of view will be counted multiple times;\nhowever, it is noted that moths tend to be rather stationary once they land on the sheet. Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. ", "original_text": "Around\nthe same time the research in [18] was published, prof. "}, "hash": "fca603c71521c44d13e08cc2697762192fce57cc7a9002b4bcf468e3e72d1791", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f21722f3-752f-4e76-939b-1429ee6d8f0b", "node_type": "1", "metadata": {"window": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n", "original_text": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n"}, "hash": "51261518d02259e68c5a15e7cf41c21f8ed92f08f0a0eca2abcdc5bc38c35548", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. ", "mimetype": "text/plain", "start_char_idx": 7967, "end_char_idx": 8104, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f21722f3-752f-4e76-939b-1429ee6d8f0b": {"__data__": {"id_": "f21722f3-752f-4e76-939b-1429ee6d8f0b", "embedding": null, "metadata": {"window": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n", "original_text": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "84361864-b821-4556-a1a5-a31862178d2e", "node_type": "1", "metadata": {"window": "Ad-\n3\n\nditionally, supposing that the average number of visits per individual is fixed over time, this\ndoesn\u2019t prevent from observing trends in species abundance. Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. ", "original_text": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. "}, "hash": "55e1411a5a3c0ac2fe8da38b145846810be349d98acab78cdee18b8ffb3d2982", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5c77ea63-5345-445f-98ad-bc69576269e2", "node_type": "1", "metadata": {"window": "Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. ", "original_text": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. "}, "hash": "30b3debab4e4af89382e741fedaf01e6e6976da684751bff2aae3ed9a4adb4fb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n", "mimetype": "text/plain", "start_char_idx": 8104, "end_char_idx": 8286, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5c77ea63-5345-445f-98ad-bc69576269e2": {"__data__": {"id_": "5c77ea63-5345-445f-98ad-bc69576269e2", "embedding": null, "metadata": {"window": "Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. ", "original_text": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f21722f3-752f-4e76-939b-1429ee6d8f0b", "node_type": "1", "metadata": {"window": "Hence, the system should be\nconsidered as a viable alternative to traditional methods that typically require tedious manual\nlabor (i.e., visiting the trap several times in a season for observation) and often result in the\nkilling of rare species of insects. Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n", "original_text": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n"}, "hash": "51261518d02259e68c5a15e7cf41c21f8ed92f08f0a0eca2abcdc5bc38c35548", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "969a5fde-1aa8-49c0-9d42-1278d227edf3", "node_type": "1", "metadata": {"window": "Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. ", "original_text": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. "}, "hash": "3b2f07149b665f1b38236655db170f57a36e5e75246d92538c67b39c09301a52", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. ", "mimetype": "text/plain", "start_char_idx": 8286, "end_char_idx": 8441, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "969a5fde-1aa8-49c0-9d42-1278d227edf3": {"__data__": {"id_": "969a5fde-1aa8-49c0-9d42-1278d227edf3", "embedding": null, "metadata": {"window": "Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. ", "original_text": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5c77ea63-5345-445f-98ad-bc69576269e2", "node_type": "1", "metadata": {"window": "Nonetheless, there is vast room for improvement. Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. ", "original_text": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. "}, "hash": "30b3debab4e4af89382e741fedaf01e6e6976da684751bff2aae3ed9a4adb4fb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "30dd1149-5ef6-4b19-9a04-1765c33e6357", "node_type": "1", "metadata": {"window": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). ", "original_text": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. "}, "hash": "0302590233149aa647fff0b394c416ea288535bdcb143e9a328dcd5f4c41c21f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. ", "mimetype": "text/plain", "start_char_idx": 8441, "end_char_idx": 8539, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "30dd1149-5ef6-4b19-9a04-1765c33e6357": {"__data__": {"id_": "30dd1149-5ef6-4b19-9a04-1765c33e6357", "embedding": null, "metadata": {"window": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). ", "original_text": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "969a5fde-1aa8-49c0-9d42-1278d227edf3", "node_type": "1", "metadata": {"window": "Around\nthe same time the research in [18] was published, prof. David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. ", "original_text": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. "}, "hash": "3b2f07149b665f1b38236655db170f57a36e5e75246d92538c67b39c09301a52", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4a5afb10-541b-4ba5-bc50-08d570df9b4a", "node_type": "1", "metadata": {"window": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. ", "original_text": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n"}, "hash": "384e5bcf5af78f7b161498883409bf23cb4276d625c5cea3bace0222e2ffbdba", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. ", "mimetype": "text/plain", "start_char_idx": 8539, "end_char_idx": 8717, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4a5afb10-541b-4ba5-bc50-08d570df9b4a": {"__data__": {"id_": "4a5afb10-541b-4ba5-bc50-08d570df9b4a", "embedding": null, "metadata": {"window": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. ", "original_text": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "30dd1149-5ef6-4b19-9a04-1765c33e6357", "node_type": "1", "metadata": {"window": "David Rolnick got involved with the\nproject, and created a team at Mila\u2014of which I was part for this thesis\u2014to work on this chal-\nlenge. In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). ", "original_text": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. "}, "hash": "0302590233149aa647fff0b394c416ea288535bdcb143e9a328dcd5f4c41c21f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f2700c23-4a8b-4b21-a0e3-fb643fa832bd", "node_type": "1", "metadata": {"window": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. ", "original_text": "One reason was the presence of insect species outside the training dataset. "}, "hash": "e1e655c80aadb885b1ce77e17fa6284f024c3273a66a7173946bf505668cd126", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n", "mimetype": "text/plain", "start_char_idx": 8717, "end_char_idx": 8803, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f2700c23-4a8b-4b21-a0e3-fb643fa832bd": {"__data__": {"id_": "f2700c23-4a8b-4b21-a0e3-fb643fa832bd", "embedding": null, "metadata": {"window": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. ", "original_text": "One reason was the presence of insect species outside the training dataset. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4a5afb10-541b-4ba5-bc50-08d570df9b4a", "node_type": "1", "metadata": {"window": "In the following paragraphs, I give a brief overview of the main limitations of the system\nas presented in [18], as well as the corresponding solution developed by the team at Mila.\n One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. ", "original_text": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n"}, "hash": "384e5bcf5af78f7b161498883409bf23cb4276d625c5cea3bace0222e2ffbdba", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0406f467-70ca-4e52-a125-5b56713e3d32", "node_type": "1", "metadata": {"window": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n", "original_text": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. "}, "hash": "ead2bcdaef40887576da026e87f3fb7fd6e6d303a22da92db20bdc5d79a3d47e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "One reason was the presence of insect species outside the training dataset. ", "mimetype": "text/plain", "start_char_idx": 8803, "end_char_idx": 8879, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0406f467-70ca-4e52-a125-5b56713e3d32": {"__data__": {"id_": "0406f467-70ca-4e52-a125-5b56713e3d32", "embedding": null, "metadata": {"window": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n", "original_text": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f2700c23-4a8b-4b21-a0e3-fb643fa832bd", "node_type": "1", "metadata": {"window": "One major difficulty in the project was to obtain enough training data for the classifier, espe-\ncially as some of the target species had few occurrences. As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. ", "original_text": "One reason was the presence of insect species outside the training dataset. "}, "hash": "e1e655c80aadb885b1ce77e17fa6284f024c3273a66a7173946bf505668cd126", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b6bfa95e-9911-483c-8cab-301b588a3d30", "node_type": "1", "metadata": {"window": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. ", "original_text": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). "}, "hash": "f2713c5ea5269288b1e6d1a35f8c97b60f2b1ca3a6122921de67c98fa9df8a79", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. ", "mimetype": "text/plain", "start_char_idx": 8879, "end_char_idx": 9097, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b6bfa95e-9911-483c-8cab-301b588a3d30": {"__data__": {"id_": "b6bfa95e-9911-483c-8cab-301b588a3d30", "embedding": null, "metadata": {"window": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. ", "original_text": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0406f467-70ca-4e52-a125-5b56713e3d32", "node_type": "1", "metadata": {"window": "As the team in [18] resolved to have a\nbalanced dataset, only 250 images per class were selected. Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n", "original_text": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. "}, "hash": "ead2bcdaef40887576da026e87f3fb7fd6e6d303a22da92db20bdc5d79a3d47e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f46ef4dc-a8ad-4996-b30a-a4d1437349a1", "node_type": "1", "metadata": {"window": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. ", "original_text": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. "}, "hash": "799093ea4e045154e8d9946844203d1dd9671c5457c356850ecf70df100b392a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). ", "mimetype": "text/plain", "start_char_idx": 9097, "end_char_idx": 9268, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f46ef4dc-a8ad-4996-b30a-a4d1437349a1": {"__data__": {"id_": "f46ef4dc-a8ad-4996-b30a-a4d1437349a1", "embedding": null, "metadata": {"window": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. ", "original_text": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b6bfa95e-9911-483c-8cab-301b588a3d30", "node_type": "1", "metadata": {"window": "Using extensive data augmentation,\nthe dataset was scaled up by a factor of 32, and a ResNet-50 [21] model with pretrained weights\nachieved high accuracy on the validation set1. However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. ", "original_text": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). "}, "hash": "f2713c5ea5269288b1e6d1a35f8c97b60f2b1ca3a6122921de67c98fa9df8a79", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5c78eade-4392-4784-adec-d0ca1263fab8", "node_type": "1", "metadata": {"window": "One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n", "original_text": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. "}, "hash": "1bc3e918a3c6c5d95df4beb146b44e6a03a3c75f8a77a7303967b0cf950f1807", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. ", "mimetype": "text/plain", "start_char_idx": 9268, "end_char_idx": 9447, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5c78eade-4392-4784-adec-d0ca1263fab8": {"__data__": {"id_": "5c78eade-4392-4784-adec-d0ca1263fab8", "embedding": null, "metadata": {"window": "One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n", "original_text": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f46ef4dc-a8ad-4996-b30a-a4d1437349a1", "node_type": "1", "metadata": {"window": "However, the performance of the model after\ndeployment dropped, for multiple reasons.\n One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. ", "original_text": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. "}, "hash": "799093ea4e045154e8d9946844203d1dd9671c5457c356850ecf70df100b392a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5572979b-2863-44cd-870d-45d52c262bf5", "node_type": "1", "metadata": {"window": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n", "original_text": "Both classifiers are trained on GBIF data.\n"}, "hash": "eb454b3b004644df699c7b8514b617821b0772f65668c7345f7dbd3b633287c1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. ", "mimetype": "text/plain", "start_char_idx": 9447, "end_char_idx": 9618, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5572979b-2863-44cd-870d-45d52c262bf5": {"__data__": {"id_": "5572979b-2863-44cd-870d-45d52c262bf5", "embedding": null, "metadata": {"window": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n", "original_text": "Both classifiers are trained on GBIF data.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5c78eade-4392-4784-adec-d0ca1263fab8", "node_type": "1", "metadata": {"window": "One reason was the presence of insect species outside the training dataset. A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n", "original_text": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. "}, "hash": "1bc3e918a3c6c5d95df4beb146b44e6a03a3c75f8a77a7303967b0cf950f1807", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc", "node_type": "1", "metadata": {"window": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. ", "original_text": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. "}, "hash": "e0c208e58235c65a19cd78ee2a656bdd276c87f58d4e6ed52d750e798c7404cc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Both classifiers are trained on GBIF data.\n", "mimetype": "text/plain", "start_char_idx": 9618, "end_char_idx": 9661, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc": {"__data__": {"id_": "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc", "embedding": null, "metadata": {"window": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. ", "original_text": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5572979b-2863-44cd-870d-45d52c262bf5", "node_type": "1", "metadata": {"window": "A training dataset\nincluding all possible species in the region would be necessary, but given the huge diversity\nof insects, it would be impossible for a small team of researchers to build such a dataset from\nscratch. The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n", "original_text": "Both classifiers are trained on GBIF data.\n"}, "hash": "eb454b3b004644df699c7b8514b617821b0772f65668c7345f7dbd3b633287c1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ca18ce33-4362-44d5-a406-6c27a0f7861a", "node_type": "1", "metadata": {"window": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n", "original_text": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. "}, "hash": "a053bc5c8a23b1157927f1e16c8a094baa57295676434e2dc8c7563322fd4afa", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. ", "mimetype": "text/plain", "start_char_idx": 9661, "end_char_idx": 9783, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ca18ce33-4362-44d5-a406-6c27a0f7861a": {"__data__": {"id_": "ca18ce33-4362-44d5-a406-6c27a0f7861a", "embedding": null, "metadata": {"window": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n", "original_text": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc", "node_type": "1", "metadata": {"window": "The solution proposed by the team at Mila is to use images from iNaturalist and Obser-\nvation.org, accessible through the Global Biodiversity Information Facility (GBIF). Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. ", "original_text": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. "}, "hash": "e0c208e58235c65a19cd78ee2a656bdd276c87f58d4e6ed52d750e798c7404cc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5e75e3d8-1d45-4315-8837-3293744b4f73", "node_type": "1", "metadata": {"window": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. ", "original_text": "The new pipeline is\nsummarized in Fig.1.2.\n"}, "hash": "b2d222530b02405ab40841bd4c030b2cbd8aa4668518b1dbe5a381eecfb75c96", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. ", "mimetype": "text/plain", "start_char_idx": 9783, "end_char_idx": 10113, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5e75e3d8-1d45-4315-8837-3293744b4f73": {"__data__": {"id_": "5e75e3d8-1d45-4315-8837-3293744b4f73", "embedding": null, "metadata": {"window": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. ", "original_text": "The new pipeline is\nsummarized in Fig.1.2.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ca18ce33-4362-44d5-a406-6c27a0f7861a", "node_type": "1", "metadata": {"window": "Given a list\nof moth species (typically multiple thousands) relevant in a certain region, datasets of hundred\nthousand labelled images can be created and used to train the model. Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n", "original_text": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. "}, "hash": "a053bc5c8a23b1157927f1e16c8a094baa57295676434e2dc8c7563322fd4afa", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "30455866-ee22-48b1-a060-d2ba0f715c2d", "node_type": "1", "metadata": {"window": "Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. ", "original_text": "Figure 1.2: Current pipeline.\n"}, "hash": "56335e12178fb38853df2d0cff52b662fe2eeb41263dce2e9418db30140fa40b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The new pipeline is\nsummarized in Fig.1.2.\n", "mimetype": "text/plain", "start_char_idx": 10113, "end_char_idx": 10156, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "30455866-ee22-48b1-a060-d2ba0f715c2d": {"__data__": {"id_": "30455866-ee22-48b1-a060-d2ba0f715c2d", "embedding": null, "metadata": {"window": "Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. ", "original_text": "Figure 1.2: Current pipeline.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5e75e3d8-1d45-4315-8837-3293744b4f73", "node_type": "1", "metadata": {"window": "Furthermore, the clas-\nsification task was decomposed in two sub-tasks: first, moth/non-moth binary classification;\nsecond, fine-grained species classification for moths. Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. ", "original_text": "The new pipeline is\nsummarized in Fig.1.2.\n"}, "hash": "b2d222530b02405ab40841bd4c030b2cbd8aa4668518b1dbe5a381eecfb75c96", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5314ffab-e934-4121-95ff-339857a4d727", "node_type": "1", "metadata": {"window": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. ", "original_text": "1There was perhaps a methodological error in the model evaluation. "}, "hash": "c70db078e1bb96690e52629cc076a6d2ad07ff13a3cad6f27b84ece044a917a9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 1.2: Current pipeline.\n", "mimetype": "text/plain", "start_char_idx": 10156, "end_char_idx": 10186, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5314ffab-e934-4121-95ff-339857a4d727": {"__data__": {"id_": "5314ffab-e934-4121-95ff-339857a4d727", "embedding": null, "metadata": {"window": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. ", "original_text": "1There was perhaps a methodological error in the model evaluation. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "30455866-ee22-48b1-a060-d2ba0f715c2d", "node_type": "1", "metadata": {"window": "Both classifiers are trained on GBIF data.\n Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. ", "original_text": "Figure 1.2: Current pipeline.\n"}, "hash": "56335e12178fb38853df2d0cff52b662fe2eeb41263dce2e9418db30140fa40b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cd61513d-e844-47cb-8fca-0d6b4863cc35", "node_type": "1", "metadata": {"window": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. ", "original_text": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n"}, "hash": "123cca325630f890a327b6e37d7b32f81ae494dc64a71b087293427bf45913ea", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "1There was perhaps a methodological error in the model evaluation. ", "mimetype": "text/plain", "start_char_idx": 10186, "end_char_idx": 10253, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cd61513d-e844-47cb-8fca-0d6b4863cc35": {"__data__": {"id_": "cd61513d-e844-47cb-8fca-0d6b4863cc35", "embedding": null, "metadata": {"window": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. ", "original_text": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5314ffab-e934-4121-95ff-339857a4d727", "node_type": "1", "metadata": {"window": "Other significant sources of errors in the system described in [18] were the object detection and\nthe tracking algorithm. The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. ", "original_text": "1There was perhaps a methodological error in the model evaluation. "}, "hash": "c70db078e1bb96690e52629cc076a6d2ad07ff13a3cad6f27b84ece044a917a9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a3e31b32-a8d2-46f0-85c1-030e263106b4", "node_type": "1", "metadata": {"window": "The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n", "original_text": "4\n\nFrom these exciting innovations arise new exciting challenges. "}, "hash": "dc578b4612921654eca24103f7c285e7b0680c94b1b8878329c0e25c5b64b5db", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n", "mimetype": "text/plain", "start_char_idx": 10253, "end_char_idx": 10393, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a3e31b32-a8d2-46f0-85c1-030e263106b4": {"__data__": {"id_": "a3e31b32-a8d2-46f0-85c1-030e263106b4", "embedding": null, "metadata": {"window": "The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n", "original_text": "4\n\nFrom these exciting innovations arise new exciting challenges. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cd61513d-e844-47cb-8fca-0d6b4863cc35", "node_type": "1", "metadata": {"window": "The team at Mila improved both these steps: first, the classical com-\nputer vision techniques were replaced by a deep learning model for object detection; second,\nthe tracking algorithm was enhanced by introducing the similarity in the classifier\u2019s feature\nspace to the similarity metric between detections in consecutive images. The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. ", "original_text": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n"}, "hash": "123cca325630f890a327b6e37d7b32f81ae494dc64a71b087293427bf45913ea", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "98209a41-983e-4d93-bdd7-7b6fe9ace351", "node_type": "1", "metadata": {"window": "Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. ", "original_text": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. "}, "hash": "6c4b9e9e6e3c111b900c2cd6f2cd70faf798072c5eeee1c78e8603fd8a090add", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "4\n\nFrom these exciting innovations arise new exciting challenges. ", "mimetype": "text/plain", "start_char_idx": 10393, "end_char_idx": 10459, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "98209a41-983e-4d93-bdd7-7b6fe9ace351": {"__data__": {"id_": "98209a41-983e-4d93-bdd7-7b6fe9ace351", "embedding": null, "metadata": {"window": "Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. ", "original_text": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a3e31b32-a8d2-46f0-85c1-030e263106b4", "node_type": "1", "metadata": {"window": "The new pipeline is\nsummarized in Fig.1.2.\n Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n", "original_text": "4\n\nFrom these exciting innovations arise new exciting challenges. "}, "hash": "dc578b4612921654eca24103f7c285e7b0680c94b1b8878329c0e25c5b64b5db", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "effe17c5-6c45-4886-9061-8deb8777eae7", "node_type": "1", "metadata": {"window": "1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. ", "original_text": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. "}, "hash": "d912e281b27207efe24e9223cd3b31763e54126623726b447d504a8e3116858c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. ", "mimetype": "text/plain", "start_char_idx": 10459, "end_char_idx": 10665, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "effe17c5-6c45-4886-9061-8deb8777eae7": {"__data__": {"id_": "effe17c5-6c45-4886-9061-8deb8777eae7", "embedding": null, "metadata": {"window": "1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. ", "original_text": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "98209a41-983e-4d93-bdd7-7b6fe9ace351", "node_type": "1", "metadata": {"window": "Figure 1.2: Current pipeline.\n 1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. ", "original_text": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. "}, "hash": "6c4b9e9e6e3c111b900c2cd6f2cd70faf798072c5eeee1c78e8603fd8a090add", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "540a4284-abfb-469d-adf7-efdf2de6e063", "node_type": "1", "metadata": {"window": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. ", "original_text": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. "}, "hash": "267652289d696e8fda4b753f64b7c09e7cc59d1178b10b9cb40df8d92e773388", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. ", "mimetype": "text/plain", "start_char_idx": 10665, "end_char_idx": 10784, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "540a4284-abfb-469d-adf7-efdf2de6e063": {"__data__": {"id_": "540a4284-abfb-469d-adf7-efdf2de6e063", "embedding": null, "metadata": {"window": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. ", "original_text": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "effe17c5-6c45-4886-9061-8deb8777eae7", "node_type": "1", "metadata": {"window": "1There was perhaps a methodological error in the model evaluation. To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. ", "original_text": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. "}, "hash": "d912e281b27207efe24e9223cd3b31763e54126623726b447d504a8e3116858c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "be0ca02e-4045-4bc9-8a40-86124675bb1b", "node_type": "1", "metadata": {"window": "4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n", "original_text": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n"}, "hash": "c4e36a6e5bd6301ea21091682fbe22de3e6dd46de39e271fe5ac6c57f32ccbb4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. ", "mimetype": "text/plain", "start_char_idx": 10784, "end_char_idx": 10978, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "be0ca02e-4045-4bc9-8a40-86124675bb1b": {"__data__": {"id_": "be0ca02e-4045-4bc9-8a40-86124675bb1b", "embedding": null, "metadata": {"window": "4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n", "original_text": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "540a4284-abfb-469d-adf7-efdf2de6e063", "node_type": "1", "metadata": {"window": "To capture the model performance on new\ndata, the split between training and validation datasets should have been done before augmentation.\n 4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. ", "original_text": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. "}, "hash": "267652289d696e8fda4b753f64b7c09e7cc59d1178b10b9cb40df8d92e773388", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9ae8827d-c5d5-403f-ac27-989575a368d0", "node_type": "1", "metadata": {"window": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n", "original_text": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. "}, "hash": "dcfc4303f3eb3af48b3c8c9307bdd98957fe37416a1be2567edff8e3a3544c9a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n", "mimetype": "text/plain", "start_char_idx": 10978, "end_char_idx": 11095, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9ae8827d-c5d5-403f-ac27-989575a368d0": {"__data__": {"id_": "9ae8827d-c5d5-403f-ac27-989575a368d0", "embedding": null, "metadata": {"window": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n", "original_text": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "be0ca02e-4045-4bc9-8a40-86124675bb1b", "node_type": "1", "metadata": {"window": "4\n\nFrom these exciting innovations arise new exciting challenges. With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n", "original_text": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n"}, "hash": "c4e36a6e5bd6301ea21091682fbe22de3e6dd46de39e271fe5ac6c57f32ccbb4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17b70296-1704-4418-be14-d803ce2ecc58", "node_type": "1", "metadata": {"window": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. ", "original_text": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. "}, "hash": "f3d195710f771ec4f4821c7fbea012e7deeb2a33590f4243adc6f11acb1e0b79", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. ", "mimetype": "text/plain", "start_char_idx": 11095, "end_char_idx": 11213, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17b70296-1704-4418-be14-d803ce2ecc58": {"__data__": {"id_": "17b70296-1704-4418-be14-d803ce2ecc58", "embedding": null, "metadata": {"window": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. ", "original_text": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9ae8827d-c5d5-403f-ac27-989575a368d0", "node_type": "1", "metadata": {"window": "With an inference time of more\nthan 3 seconds on CPU, the object detector is too slow, and its accuracy can still largely be\nimproved; the work to address these shortcomings will be presented in chapter 2. The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n", "original_text": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. "}, "hash": "dcfc4303f3eb3af48b3c8c9307bdd98957fe37416a1be2567edff8e3a3544c9a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1ba67c51-e33b-42d7-99eb-d7de474219b2", "node_type": "1", "metadata": {"window": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. ", "original_text": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. "}, "hash": "8383d16e4beb7dce10e6b41309756ba71f90c1be180b0e3d94ddfedeb7a773e6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. ", "mimetype": "text/plain", "start_char_idx": 11213, "end_char_idx": 11357, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1ba67c51-e33b-42d7-99eb-d7de474219b2": {"__data__": {"id_": "1ba67c51-e33b-42d7-99eb-d7de474219b2", "embedding": null, "metadata": {"window": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. ", "original_text": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17b70296-1704-4418-be14-d803ce2ecc58", "node_type": "1", "metadata": {"window": "The new\nclassifier is affected by the domain shift between the GBIF training data and the target data\nfrom moth traps. While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. ", "original_text": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. "}, "hash": "f3d195710f771ec4f4821c7fbea012e7deeb2a33590f4243adc6f11acb1e0b79", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7ff32c28-a48e-4e1b-9b6d-608f353fef98", "node_type": "1", "metadata": {"window": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. ", "original_text": "To keep the costs down, the deployments would also not be equipped with GPUs.\n"}, "hash": "8667aa410145d1d83f226976778cb2203a4dda4b2f89818fb159113ec5a9033f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. ", "mimetype": "text/plain", "start_char_idx": 11357, "end_char_idx": 11509, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7ff32c28-a48e-4e1b-9b6d-608f353fef98": {"__data__": {"id_": "7ff32c28-a48e-4e1b-9b6d-608f353fef98", "embedding": null, "metadata": {"window": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. ", "original_text": "To keep the costs down, the deployments would also not be equipped with GPUs.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1ba67c51-e33b-42d7-99eb-d7de474219b2", "node_type": "1", "metadata": {"window": "While strong data augmentation operations have mitigated the problem,\neven better results are expected if the GBIF training datasets can be enhanced with manually\nlabeled images from the traps. The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. ", "original_text": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. "}, "hash": "8383d16e4beb7dce10e6b41309756ba71f90c1be180b0e3d94ddfedeb7a773e6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8581e29b-16dc-4f42-bc24-47cb29731d88", "node_type": "1", "metadata": {"window": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n", "original_text": "For these two reasons, increasing the object detector speed was very attractive.\n"}, "hash": "5599af10bbd80c2d888d3b2c8f0f0ac3bfd02097dae7249ff18c6cb62ee27c33", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To keep the costs down, the deployments would also not be equipped with GPUs.\n", "mimetype": "text/plain", "start_char_idx": 11509, "end_char_idx": 11587, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8581e29b-16dc-4f42-bc24-47cb29731d88": {"__data__": {"id_": "8581e29b-16dc-4f42-bc24-47cb29731d88", "embedding": null, "metadata": {"window": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n", "original_text": "For these two reasons, increasing the object detector speed was very attractive.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7ff32c28-a48e-4e1b-9b6d-608f353fef98", "node_type": "1", "metadata": {"window": "The implementation of active learning techniques to make the\nmost of the manual work will be discussed in chapter 3.\n 5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. ", "original_text": "To keep the costs down, the deployments would also not be equipped with GPUs.\n"}, "hash": "8667aa410145d1d83f226976778cb2203a4dda4b2f89818fb159113ec5a9033f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6a3d36bb-ce49-48e0-bc98-77b76c8ac681", "node_type": "1", "metadata": {"window": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. ", "original_text": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. "}, "hash": "e9dc08721ef6bc294fe360693be3caa99ae7044afaaa648303bf21af9fda704e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For these two reasons, increasing the object detector speed was very attractive.\n", "mimetype": "text/plain", "start_char_idx": 11587, "end_char_idx": 11668, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6a3d36bb-ce49-48e0-bc98-77b76c8ac681": {"__data__": {"id_": "6a3d36bb-ce49-48e0-bc98-77b76c8ac681", "embedding": null, "metadata": {"window": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. ", "original_text": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8581e29b-16dc-4f42-bc24-47cb29731d88", "node_type": "1", "metadata": {"window": "5\n\nChapter 2\nObject Detection\nWhen I joined the team in May, the speed of the object detector was a point of concern. For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n", "original_text": "For these two reasons, increasing the object detector speed was very attractive.\n"}, "hash": "5599af10bbd80c2d888d3b2c8f0f0ac3bfd02097dae7249ff18c6cb62ee27c33", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25055c3c-e8cf-42ae-a948-5953dc4b97ba", "node_type": "1", "metadata": {"window": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. ", "original_text": "Its goal is to localize insects. "}, "hash": "1ad9a0d128dc02eacd4377528724bf059bc9cb0e6f2fbafa288253ad94c4fcbd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. ", "mimetype": "text/plain", "start_char_idx": 11668, "end_char_idx": 11798, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25055c3c-e8cf-42ae-a948-5953dc4b97ba": {"__data__": {"id_": "25055c3c-e8cf-42ae-a948-5953dc4b97ba", "embedding": null, "metadata": {"window": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. ", "original_text": "Its goal is to localize insects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6a3d36bb-ce49-48e0-bc98-77b76c8ac681", "node_type": "1", "metadata": {"window": "For\nthe collaborators and users of the project without GPUs, the model\u2019s inference time of more\nthan three seconds on CPU was a big limitation. Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. ", "original_text": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. "}, "hash": "e9dc08721ef6bc294fe360693be3caa99ae7044afaaa648303bf21af9fda704e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2d80ce78-54b9-4216-9c4c-29a5c55a42ec", "node_type": "1", "metadata": {"window": "To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. ", "original_text": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. "}, "hash": "125229bbb2f6d2643c532f117f8b65abf8a5febdadd3f479f6c68e4562fffe0c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Its goal is to localize insects. ", "mimetype": "text/plain", "start_char_idx": 11798, "end_char_idx": 11831, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2d80ce78-54b9-4216-9c4c-29a5c55a42ec": {"__data__": {"id_": "2d80ce78-54b9-4216-9c4c-29a5c55a42ec", "embedding": null, "metadata": {"window": "To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. ", "original_text": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25055c3c-e8cf-42ae-a948-5953dc4b97ba", "node_type": "1", "metadata": {"window": "Additionally, there was an idea to transfer\nthe object detection step of the pipeline to the deployments, in order to reduce the amount of\nstored data. To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. ", "original_text": "Its goal is to localize insects. "}, "hash": "1ad9a0d128dc02eacd4377528724bf059bc9cb0e6f2fbafa288253ad94c4fcbd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7", "node_type": "1", "metadata": {"window": "For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n", "original_text": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n"}, "hash": "4552a35bb5e38c366067cc1099b3572085b0f4cbd48d3ed2edcd8c392aa727b5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. ", "mimetype": "text/plain", "start_char_idx": 11831, "end_char_idx": 12017, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7": {"__data__": {"id_": "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7", "embedding": null, "metadata": {"window": "For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n", "original_text": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2d80ce78-54b9-4216-9c4c-29a5c55a42ec", "node_type": "1", "metadata": {"window": "To keep the costs down, the deployments would also not be equipped with GPUs.\n For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. ", "original_text": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. "}, "hash": "125229bbb2f6d2643c532f117f8b65abf8a5febdadd3f479f6c68e4562fffe0c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "09f02465-7111-4748-b6e8-73752d699022", "node_type": "1", "metadata": {"window": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. ", "original_text": "2.1. "}, "hash": "59d1dffe1d5363335bc8b5c711bfeafd55c3ad6d3098b31def1f1e86eb575560", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n", "mimetype": "text/plain", "start_char_idx": 12017, "end_char_idx": 12144, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "09f02465-7111-4748-b6e8-73752d699022": {"__data__": {"id_": "09f02465-7111-4748-b6e8-73752d699022", "embedding": null, "metadata": {"window": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. ", "original_text": "2.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7", "node_type": "1", "metadata": {"window": "For these two reasons, increasing the object detector speed was very attractive.\n The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n", "original_text": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n"}, "hash": "4552a35bb5e38c366067cc1099b3572085b0f4cbd48d3ed2edcd8c392aa727b5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e", "node_type": "1", "metadata": {"window": "Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. ", "original_text": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. "}, "hash": "189fd8b28f91a84ee94eba1fafd26a0dbd42a4c1190ac1ce94b44af392b7a1d7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.1. ", "mimetype": "text/plain", "start_char_idx": 12144, "end_char_idx": 12149, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e": {"__data__": {"id_": "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e", "embedding": null, "metadata": {"window": "Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. ", "original_text": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "09f02465-7111-4748-b6e8-73752d699022", "node_type": "1", "metadata": {"window": "The object detector was a Faster R-CNN with a ResNet-50-FPN backbone [22, 23], as imple-\nmented in PyTorch\u2019s torchvision package. Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. ", "original_text": "2.1. "}, "hash": "59d1dffe1d5363335bc8b5c711bfeafd55c3ad6d3098b31def1f1e86eb575560", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d28de995-adf0-4861-9b60-eff1bd7dc124", "node_type": "1", "metadata": {"window": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. ", "original_text": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. "}, "hash": "fe052919b2ad5bca007902d62e0b514aab7662d545c92c7143831f57a81d13b4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. ", "mimetype": "text/plain", "start_char_idx": 12149, "end_char_idx": 12246, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d28de995-adf0-4861-9b60-eff1bd7dc124": {"__data__": {"id_": "d28de995-adf0-4861-9b60-eff1bd7dc124", "embedding": null, "metadata": {"window": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. ", "original_text": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e", "node_type": "1", "metadata": {"window": "Its goal is to localize insects. It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. ", "original_text": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. "}, "hash": "189fd8b28f91a84ee94eba1fafd26a0dbd42a4c1190ac1ce94b44af392b7a1d7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92", "node_type": "1", "metadata": {"window": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. ", "original_text": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n"}, "hash": "f2dfdbbe5a67d24ce398b6be9d209172f77924a19974ca6f82147507d2ac9f39", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. ", "mimetype": "text/plain", "start_char_idx": 12246, "end_char_idx": 12413, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92": {"__data__": {"id_": "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92", "embedding": null, "metadata": {"window": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. ", "original_text": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d28de995-adf0-4861-9b60-eff1bd7dc124", "node_type": "1", "metadata": {"window": "It was trained on a set\nof images on which bounding boxes could accurately be inferred with classical computer vi-\nsion techniques, due to the low density of moths and clean background. My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. ", "original_text": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. "}, "hash": "fe052919b2ad5bca007902d62e0b514aab7662d545c92c7143831f57a81d13b4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4", "node_type": "1", "metadata": {"window": "2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. ", "original_text": "2.1.1. "}, "hash": "866b8dba809fcaaca315a6d3af56925eb42e89d537d1a0a03eaffe178349faec", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n", "mimetype": "text/plain", "start_char_idx": 12413, "end_char_idx": 12555, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4": {"__data__": {"id_": "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4", "embedding": null, "metadata": {"window": "2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. ", "original_text": "2.1.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92", "node_type": "1", "metadata": {"window": "My efforts to improve\nthe model can be grouped around three main axes: the framework, the training data, and the\narchitecture.\n 2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. ", "original_text": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n"}, "hash": "f2dfdbbe5a67d24ce398b6be9d209172f77924a19974ca6f82147507d2ac9f39", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a31e210e-d07f-4e17-96db-44873154d745", "node_type": "1", "metadata": {"window": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. ", "original_text": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. "}, "hash": "b5d28a3be0f0e7eea41c1d4229ad7b639f0419fed501946265096df531cc6427", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.1.1. ", "mimetype": "text/plain", "start_char_idx": 12555, "end_char_idx": 12562, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a31e210e-d07f-4e17-96db-44873154d745": {"__data__": {"id_": "a31e210e-d07f-4e17-96db-44873154d745", "embedding": null, "metadata": {"window": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. ", "original_text": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4", "node_type": "1", "metadata": {"window": "2.1. T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. ", "original_text": "2.1.1. "}, "hash": "866b8dba809fcaaca315a6d3af56925eb42e89d537d1a0a03eaffe178349faec", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "09239c16-5b95-4566-8027-af1115b90ffb", "node_type": "1", "metadata": {"window": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. ", "original_text": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. "}, "hash": "2a14992363d218016cd4d91f93a64d877e428bb05d8f063f64ffcb82db5f2059", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. ", "mimetype": "text/plain", "start_char_idx": 12562, "end_char_idx": 12685, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "09239c16-5b95-4566-8027-af1115b90ffb": {"__data__": {"id_": "09239c16-5b95-4566-8027-af1115b90ffb", "embedding": null, "metadata": {"window": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. ", "original_text": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a31e210e-d07f-4e17-96db-44873154d745", "node_type": "1", "metadata": {"window": "T HE FRAMEWORK\nHaving a good framework is fundamental to allow efficient development of a model. One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. ", "original_text": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. "}, "hash": "b5d28a3be0f0e7eea41c1d4229ad7b639f0419fed501946265096df531cc6427", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "57dd61ec-fd44-49ad-8777-e972d4ff2ee0", "node_type": "1", "metadata": {"window": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n", "original_text": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. "}, "hash": "9f738f6dc26292c9552f70c5fa300e63d2d2d3d59963320a3f300f6413fb0de8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. ", "mimetype": "text/plain", "start_char_idx": 12685, "end_char_idx": 12818, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "57dd61ec-fd44-49ad-8777-e972d4ff2ee0": {"__data__": {"id_": "57dd61ec-fd44-49ad-8777-e972d4ff2ee0", "embedding": null, "metadata": {"window": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n", "original_text": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "09239c16-5b95-4566-8027-af1115b90ffb", "node_type": "1", "metadata": {"window": "One\nneeds to be able to easily (i) keep track of experiments and link checkpoints to training runs,\n(ii) integrate new functionalities, and (iii) evaluate the models. As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. ", "original_text": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. "}, "hash": "2a14992363d218016cd4d91f93a64d877e428bb05d8f063f64ffcb82db5f2059", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9d6d45c1-1b08-44cc-813d-3104c9d48101", "node_type": "1", "metadata": {"window": "2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. ", "original_text": "Hence, a full refactor was needed. "}, "hash": "9ae009eaacf19ed2f6727901bfbf719a1bdc930f3c9aba00461f0b7827d855fc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. ", "mimetype": "text/plain", "start_char_idx": 12818, "end_char_idx": 13093, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9d6d45c1-1b08-44cc-813d-3104c9d48101": {"__data__": {"id_": "9d6d45c1-1b08-44cc-813d-3104c9d48101", "embedding": null, "metadata": {"window": "2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. ", "original_text": "Hence, a full refactor was needed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "57dd61ec-fd44-49ad-8777-e972d4ff2ee0", "node_type": "1", "metadata": {"window": "As the work on the classification\npart of the pipeline had taken most of the team\u2019s time, none of this was in place for the object\ndetection.\n 2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n", "original_text": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. "}, "hash": "9f738f6dc26292c9552f70c5fa300e63d2d2d3d59963320a3f300f6413fb0de8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4", "node_type": "1", "metadata": {"window": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. ", "original_text": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. "}, "hash": "42a291a618ae4732fccf09ad2183509e2abbfb9fa1b34108016552c54a9c642e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, a full refactor was needed. ", "mimetype": "text/plain", "start_char_idx": 13093, "end_char_idx": 13128, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4": {"__data__": {"id_": "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4", "embedding": null, "metadata": {"window": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. ", "original_text": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9d6d45c1-1b08-44cc-813d-3104c9d48101", "node_type": "1", "metadata": {"window": "2.1.1. Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. ", "original_text": "Hence, a full refactor was needed. "}, "hash": "9ae009eaacf19ed2f6727901bfbf719a1bdc930f3c9aba00461f0b7827d855fc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "080c4498-a345-4c55-acf3-3ffc2b1d5347", "node_type": "1", "metadata": {"window": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. ", "original_text": "The supported models are all those available in\ntorchvision. "}, "hash": "b42699445b7e4b6064f8cb58e6c3616f0cfc98bc630ad245bfedeae22b4b0bf0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. ", "mimetype": "text/plain", "start_char_idx": 13128, "end_char_idx": 13311, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "080c4498-a345-4c55-acf3-3ffc2b1d5347": {"__data__": {"id_": "080c4498-a345-4c55-acf3-3ffc2b1d5347", "embedding": null, "metadata": {"window": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. ", "original_text": "The supported models are all those available in\ntorchvision. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4", "node_type": "1", "metadata": {"window": "Code improvements and experiment tracking\nThe code was organized in a number of Jupyter Notebooks and some python modules. Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. ", "original_text": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. "}, "hash": "42a291a618ae4732fccf09ad2183509e2abbfb9fa1b34108016552c54a9c642e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa", "node_type": "1", "metadata": {"window": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n", "original_text": "The highest standards for code quality were held.\n"}, "hash": "87221c89b849197e3fcdfa9dbf2cebed9937088be71c1ef195c848d714531eaf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The supported models are all those available in\ntorchvision. ", "mimetype": "text/plain", "start_char_idx": 13311, "end_char_idx": 13372, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa": {"__data__": {"id_": "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa", "embedding": null, "metadata": {"window": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n", "original_text": "The highest standards for code quality were held.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "080c4498-a345-4c55-acf3-3ffc2b1d5347", "node_type": "1", "metadata": {"window": "Jupyter\nNotebooks are fine for early data analysis and exploration, but they certainly have little place in\na more advanced project. The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. ", "original_text": "The supported models are all those available in\ntorchvision. "}, "hash": "b42699445b7e4b6064f8cb58e6c3616f0cfc98bc630ad245bfedeae22b4b0bf0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b30ae036-584a-41c0-ab11-09b566a27014", "node_type": "1", "metadata": {"window": "Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. ", "original_text": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. "}, "hash": "f2a7b8203c10ddb77a15cf0df84c342a956416e7f3ecda3d701f39cff85702f2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The highest standards for code quality were held.\n", "mimetype": "text/plain", "start_char_idx": 13372, "end_char_idx": 13422, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b30ae036-584a-41c0-ab11-09b566a27014": {"__data__": {"id_": "b30ae036-584a-41c0-ab11-09b566a27014", "embedding": null, "metadata": {"window": "Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. ", "original_text": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa", "node_type": "1", "metadata": {"window": "The non-linear workflow and the lack of IDE features (such as linting\nand code styling warnings) favor errors and bad coding practices; additionally, they are terri-\nble for code versioning1, all reasons that make it hard for teammates to collaborate and expand\nthe project. Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n", "original_text": "The highest standards for code quality were held.\n"}, "hash": "87221c89b849197e3fcdfa9dbf2cebed9937088be71c1ef195c848d714531eaf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8347e777-f555-484f-ba9b-687f777dce65", "node_type": "1", "metadata": {"window": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. ", "original_text": "It is important to be able to link each model to a training\nrun, with all its defining parameters. "}, "hash": "db50af63b686b98cbb6f6fb560138af5a7f6466c250de1cca22d2d0c59f97fd8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. ", "mimetype": "text/plain", "start_char_idx": 13422, "end_char_idx": 13643, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8347e777-f555-484f-ba9b-687f777dce65": {"__data__": {"id_": "8347e777-f555-484f-ba9b-687f777dce65", "embedding": null, "metadata": {"window": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. ", "original_text": "It is important to be able to link each model to a training\nrun, with all its defining parameters. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b30ae036-584a-41c0-ab11-09b566a27014", "node_type": "1", "metadata": {"window": "Hence, a full refactor was needed. The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. ", "original_text": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. "}, "hash": "f2a7b8203c10ddb77a15cf0df84c342a956416e7f3ecda3d701f39cff85702f2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b07a199d-3d88-4932-8360-c436e7624a7b", "node_type": "1", "metadata": {"window": "The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. ", "original_text": "To this end, the popular Weights & Biases platform was\nadopted. "}, "hash": "f5eda4a66eb60b8529ef70087436c6abae5dfe813109944cdb3cd71cacfa5caf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It is important to be able to link each model to a training\nrun, with all its defining parameters. ", "mimetype": "text/plain", "start_char_idx": 13643, "end_char_idx": 13742, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b07a199d-3d88-4932-8360-c436e7624a7b": {"__data__": {"id_": "b07a199d-3d88-4932-8360-c436e7624a7b", "embedding": null, "metadata": {"window": "The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. ", "original_text": "To this end, the popular Weights & Biases platform was\nadopted. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8347e777-f555-484f-ba9b-687f777dce65", "node_type": "1", "metadata": {"window": "The result was a few modules for dataset dec-\nlarations and common functions definitions, and two command-line scripts, one for launching\ntrainings, and one for launching inferences. The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. ", "original_text": "It is important to be able to link each model to a training\nrun, with all its defining parameters. "}, "hash": "db50af63b686b98cbb6f6fb560138af5a7f6466c250de1cca22d2d0c59f97fd8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "18f2dcce-f902-43c9-8004-213e3ce18d2f", "node_type": "1", "metadata": {"window": "The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. ", "original_text": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n"}, "hash": "e62834830a4ceb17fe28d46b3d211ffeb7128acf75928202e6f2fc1aa928bd5b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To this end, the popular Weights & Biases platform was\nadopted. ", "mimetype": "text/plain", "start_char_idx": 13742, "end_char_idx": 13806, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "18f2dcce-f902-43c9-8004-213e3ce18d2f": {"__data__": {"id_": "18f2dcce-f902-43c9-8004-213e3ce18d2f", "embedding": null, "metadata": {"window": "The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. ", "original_text": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b07a199d-3d88-4932-8360-c436e7624a7b", "node_type": "1", "metadata": {"window": "The supported models are all those available in\ntorchvision. The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. ", "original_text": "To this end, the popular Weights & Biases platform was\nadopted. "}, "hash": "f5eda4a66eb60b8529ef70087436c6abae5dfe813109944cdb3cd71cacfa5caf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "20816ca8-bcbb-4b1a-be21-b5cf03d5375a", "node_type": "1", "metadata": {"window": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. ", "original_text": "2.1.2. "}, "hash": "75e8742e119888e5d57a7ce02003754e79e529dfe761e4462e7ba3ac4eb941a0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n", "mimetype": "text/plain", "start_char_idx": 13806, "end_char_idx": 14034, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "20816ca8-bcbb-4b1a-be21-b5cf03d5375a": {"__data__": {"id_": "20816ca8-bcbb-4b1a-be21-b5cf03d5375a", "embedding": null, "metadata": {"window": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. ", "original_text": "2.1.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "18f2dcce-f902-43c9-8004-213e3ce18d2f", "node_type": "1", "metadata": {"window": "The highest standards for code quality were held.\n 1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. ", "original_text": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n"}, "hash": "e62834830a4ceb17fe28d46b3d211ffeb7128acf75928202e6f2fc1aa928bd5b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "97ab9268-d664-4cdb-bc6e-01a11f926629", "node_type": "1", "metadata": {"window": "It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n", "original_text": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. "}, "hash": "64f98d344159ef64225a7c4a984340623c1ea5705085c518cec62149be24ed0b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.1.2. ", "mimetype": "text/plain", "start_char_idx": 14034, "end_char_idx": 14041, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "97ab9268-d664-4cdb-bc6e-01a11f926629": {"__data__": {"id_": "97ab9268-d664-4cdb-bc6e-01a11f926629", "embedding": null, "metadata": {"window": "It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n", "original_text": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "20816ca8-bcbb-4b1a-be21-b5cf03d5375a", "node_type": "1", "metadata": {"window": "1This was less of a concern early on, as the team was not using a common repository\n6\n\nDuring development, tens of trainings can be launched in short intervals of time, and the num-\nber of models quickly start to add up. It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. ", "original_text": "2.1.2. "}, "hash": "75e8742e119888e5d57a7ce02003754e79e529dfe761e4462e7ba3ac4eb941a0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7c0be403-436c-4820-9984-136e9b5d21c7", "node_type": "1", "metadata": {"window": "To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). ", "original_text": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. "}, "hash": "abbfd43bc8b20971423fa2875b9dc2519c5a8dea10cb0a166a72ba06b751c0d3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. ", "mimetype": "text/plain", "start_char_idx": 14041, "end_char_idx": 14209, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7c0be403-436c-4820-9984-136e9b5d21c7": {"__data__": {"id_": "7c0be403-436c-4820-9984-136e9b5d21c7", "embedding": null, "metadata": {"window": "To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). ", "original_text": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "97ab9268-d664-4cdb-bc6e-01a11f926629", "node_type": "1", "metadata": {"window": "It is important to be able to link each model to a training\nrun, with all its defining parameters. To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n", "original_text": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. "}, "hash": "64f98d344159ef64225a7c4a984340623c1ea5705085c518cec62149be24ed0b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ce22a1e4-9c08-4287-94d3-2569b25c1850", "node_type": "1", "metadata": {"window": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. ", "original_text": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. "}, "hash": "109a57904978953011bdb5f6545caf69f238f375615d71176f540e50b76f3f2e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. ", "mimetype": "text/plain", "start_char_idx": 14209, "end_char_idx": 14455, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ce22a1e4-9c08-4287-94d3-2569b25c1850": {"__data__": {"id_": "ce22a1e4-9c08-4287-94d3-2569b25c1850", "embedding": null, "metadata": {"window": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. ", "original_text": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7c0be403-436c-4820-9984-136e9b5d21c7", "node_type": "1", "metadata": {"window": "To this end, the popular Weights & Biases platform was\nadopted. The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). ", "original_text": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. "}, "hash": "abbfd43bc8b20971423fa2875b9dc2519c5a8dea10cb0a166a72ba06b751c0d3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "881f882a-c04e-4abd-b470-9608b6ce84f5", "node_type": "1", "metadata": {"window": "2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n", "original_text": "A screenshot of\nthe GUI is displayed in Fig.2.1. "}, "hash": "1c4f6e40b84fc0d66f4d5add978bdaea8f438a503f0c4286b9adf67e74604c8f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. ", "mimetype": "text/plain", "start_char_idx": 14455, "end_char_idx": 14563, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "881f882a-c04e-4abd-b470-9608b6ce84f5": {"__data__": {"id_": "881f882a-c04e-4abd-b470-9608b6ce84f5", "embedding": null, "metadata": {"window": "2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n", "original_text": "A screenshot of\nthe GUI is displayed in Fig.2.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ce22a1e4-9c08-4287-94d3-2569b25c1850", "node_type": "1", "metadata": {"window": "The training configuration and the model weights are automatically uploaded to the\nplatform; the model checkpoint is also saved locally, under a name that includes the model\narchitecture and the training run ID assigned by W&B.\n 2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. ", "original_text": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. "}, "hash": "109a57904978953011bdb5f6545caf69f238f375615d71176f540e50b76f3f2e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "815b7478-91e6-48ee-ad73-5bcac53aa1da", "node_type": "1", "metadata": {"window": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. ", "original_text": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n"}, "hash": "47e6a549559d36df6ed7b3363e2f0873c666d4b57423b78c82224863ea0dc07c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A screenshot of\nthe GUI is displayed in Fig.2.1. ", "mimetype": "text/plain", "start_char_idx": 14563, "end_char_idx": 14612, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "815b7478-91e6-48ee-ad73-5bcac53aa1da": {"__data__": {"id_": "815b7478-91e6-48ee-ad73-5bcac53aa1da", "embedding": null, "metadata": {"window": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. ", "original_text": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "881f882a-c04e-4abd-b470-9608b6ce84f5", "node_type": "1", "metadata": {"window": "2.1.2. Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n", "original_text": "A screenshot of\nthe GUI is displayed in Fig.2.1. "}, "hash": "1c4f6e40b84fc0d66f4d5add978bdaea8f438a503f0c4286b9adf67e74604c8f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "da32abdb-18f3-48e9-9a4f-0532fd6cc95c", "node_type": "1", "metadata": {"window": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. ", "original_text": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). "}, "hash": "6080aa849bda4ce1864ef06d74adb3673c33eacc05319f5a63b4449a18654e8e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n", "mimetype": "text/plain", "start_char_idx": 14612, "end_char_idx": 14874, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "da32abdb-18f3-48e9-9a4f-0532fd6cc95c": {"__data__": {"id_": "da32abdb-18f3-48e9-9a4f-0532fd6cc95c", "embedding": null, "metadata": {"window": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. ", "original_text": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "815b7478-91e6-48ee-ad73-5bcac53aa1da", "node_type": "1", "metadata": {"window": "Model evaluation, metrics and threshold analysis\nVisual inspection\nThe only way to evaluate the model was to run inferences and visualize the predicted bounding\nboxes. These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. ", "original_text": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n"}, "hash": "47e6a549559d36df6ed7b3363e2f0873c666d4b57423b78c82224863ea0dc07c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ef0d3042-1780-48a4-a7e2-68ac91d40ef9", "node_type": "1", "metadata": {"window": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. ", "original_text": "The user moves from one image to the other with the left and right arrow\nkeys. "}, "hash": "96765c81bc9ef27d7f16259e5b13674e03304ebe8189b1b915878ae8c97cf5b2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). ", "mimetype": "text/plain", "start_char_idx": 14874, "end_char_idx": 15275, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ef0d3042-1780-48a4-a7e2-68ac91d40ef9": {"__data__": {"id_": "ef0d3042-1780-48a4-a7e2-68ac91d40ef9", "embedding": null, "metadata": {"window": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. ", "original_text": "The user moves from one image to the other with the left and right arrow\nkeys. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "da32abdb-18f3-48e9-9a4f-0532fd6cc95c", "node_type": "1", "metadata": {"window": "These were saved directly on copies of the images, which presents two major inconve-\nniences: (i) it is very inefficient in terms of memory, and (ii) it makes it very unpractical to\nvisualize the model performance as a function of the threshold. To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. ", "original_text": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). "}, "hash": "6080aa849bda4ce1864ef06d74adb3673c33eacc05319f5a63b4449a18654e8e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9bf70dab-412b-4c9c-b211-86e70caab207", "node_type": "1", "metadata": {"window": "A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. ", "original_text": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n"}, "hash": "51ee27cfbd8f8e541139aaee88d328d85e7cd4278d5aebca844a211bcc27c586", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The user moves from one image to the other with the left and right arrow\nkeys. ", "mimetype": "text/plain", "start_char_idx": 15275, "end_char_idx": 15354, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9bf70dab-412b-4c9c-b211-86e70caab207": {"__data__": {"id_": "9bf70dab-412b-4c9c-b211-86e70caab207", "embedding": null, "metadata": {"window": "A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. ", "original_text": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ef0d3042-1780-48a4-a7e2-68ac91d40ef9", "node_type": "1", "metadata": {"window": "To address these issues, a sim-\nple python GUI application was created, using the standard tkinter package. A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. ", "original_text": "The user moves from one image to the other with the left and right arrow\nkeys. "}, "hash": "96765c81bc9ef27d7f16259e5b13674e03304ebe8189b1b915878ae8c97cf5b2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "653c70f6-f074-4140-b88e-3cf8998f75a2", "node_type": "1", "metadata": {"window": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. ", "original_text": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. "}, "hash": "6a9e127fe40364b7bf3dd6c76b49bcc7792b9ccddb78ab95f94369bc021183ef", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n", "mimetype": "text/plain", "start_char_idx": 15354, "end_char_idx": 15473, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "653c70f6-f074-4140-b88e-3cf8998f75a2": {"__data__": {"id_": "653c70f6-f074-4140-b88e-3cf8998f75a2", "embedding": null, "metadata": {"window": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. ", "original_text": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9bf70dab-412b-4c9c-b211-86e70caab207", "node_type": "1", "metadata": {"window": "A screenshot of\nthe GUI is displayed in Fig.2.1. It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. ", "original_text": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n"}, "hash": "51ee27cfbd8f8e541139aaee88d328d85e7cd4278d5aebca844a211bcc27c586", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f226afe7-e029-4a9f-b1c9-454abe410d7b", "node_type": "1", "metadata": {"window": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. ", "original_text": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. "}, "hash": "c8a257df62de322a00d808bf858f356ae20350e4e20f39a4a33a5b4835d59e5a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. ", "mimetype": "text/plain", "start_char_idx": 15473, "end_char_idx": 15708, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f226afe7-e029-4a9f-b1c9-454abe410d7b": {"__data__": {"id_": "f226afe7-e029-4a9f-b1c9-454abe410d7b", "embedding": null, "metadata": {"window": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. ", "original_text": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "653c70f6-f074-4140-b88e-3cf8998f75a2", "node_type": "1", "metadata": {"window": "It is started from command line by running the python script,\nwith two inputs: the path to the json file containing the model\u2019s predictions (which is outputted\nby the inference script), and, optionally, if they are in a different folder, the path to the images.\n The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. ", "original_text": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. "}, "hash": "6a9e127fe40364b7bf3dd6c76b49bcc7792b9ccddb78ab95f94369bc021183ef", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d2c585b-1fbd-4787-942e-8eb3c9d8b979", "node_type": "1", "metadata": {"window": "The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. ", "original_text": "With currently ongoing annotation efforts, the test sets will soon finally be available. "}, "hash": "c97934cb16a56f6a462ba9ccf85f15e9e5548e5a5edf571547bb1c8d2770e946", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. ", "mimetype": "text/plain", "start_char_idx": 15708, "end_char_idx": 15864, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d2c585b-1fbd-4787-942e-8eb3c9d8b979": {"__data__": {"id_": "6d2c585b-1fbd-4787-942e-8eb3c9d8b979", "embedding": null, "metadata": {"window": "The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. ", "original_text": "With currently ongoing annotation efforts, the test sets will soon finally be available. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f226afe7-e029-4a9f-b1c9-454abe410d7b", "node_type": "1", "metadata": {"window": "The window presents: (i) a slider to modify the threshold (bounding boxes will appear and\ndisappear accordingly); (ii) the path to the json file (a useful reminder when the app is run in\nparallel to compare models); (iii) the image filename; (iv) a counter indicating the current im-\nage number out of the total; (v) and an entry to modify the step (useful to quickly move across\nhundreds of images). The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. ", "original_text": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. "}, "hash": "c8a257df62de322a00d808bf858f356ae20350e4e20f39a4a33a5b4835d59e5a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "585507d7-11dd-4785-9536-1d503cc157d9", "node_type": "1", "metadata": {"window": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). ", "original_text": "In\nanticipation of that, I developed the relevant metrics. "}, "hash": "61e539988d855eba7c657290843d3f8a08a7bcdc1f31a38876b1c53c9930340a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "With currently ongoing annotation efforts, the test sets will soon finally be available. ", "mimetype": "text/plain", "start_char_idx": 15864, "end_char_idx": 15953, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "585507d7-11dd-4785-9536-1d503cc157d9": {"__data__": {"id_": "585507d7-11dd-4785-9536-1d503cc157d9", "embedding": null, "metadata": {"window": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). ", "original_text": "In\nanticipation of that, I developed the relevant metrics. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d2c585b-1fbd-4787-942e-8eb3c9d8b979", "node_type": "1", "metadata": {"window": "The user moves from one image to the other with the left and right arrow\nkeys. The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. ", "original_text": "With currently ongoing annotation efforts, the test sets will soon finally be available. "}, "hash": "c97934cb16a56f6a462ba9ccf85f15e9e5548e5a5edf571547bb1c8d2770e946", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fbc8a8ff-692e-48fc-afad-81ec2e7777e2", "node_type": "1", "metadata": {"window": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. ", "original_text": "The mean average precision (mAP) is the\nstandard metric for object detectors. "}, "hash": "9171f07400338d554da9848e5a024816277a183b02446f50652e816338c931c0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In\nanticipation of that, I developed the relevant metrics. ", "mimetype": "text/plain", "start_char_idx": 15953, "end_char_idx": 16012, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fbc8a8ff-692e-48fc-afad-81ec2e7777e2": {"__data__": {"id_": "fbc8a8ff-692e-48fc-afad-81ec2e7777e2", "embedding": null, "metadata": {"window": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. ", "original_text": "The mean average precision (mAP) is the\nstandard metric for object detectors. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "585507d7-11dd-4785-9536-1d503cc157d9", "node_type": "1", "metadata": {"window": "The GUI also accepts ground truths instead of model predictions, in which case the score\nthreshold won\u2019t be displayed.\n Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). ", "original_text": "In\nanticipation of that, I developed the relevant metrics. "}, "hash": "61e539988d855eba7c657290843d3f8a08a7bcdc1f31a38876b1c53c9930340a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "337fcab1-fcb4-4a29-9db8-82b36a72ecfb", "node_type": "1", "metadata": {"window": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. ", "original_text": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. "}, "hash": "e1eadcb90881c7d091a53c51a080d5b73f7cdf1e3af3e23e3a8091ee0aca14c5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The mean average precision (mAP) is the\nstandard metric for object detectors. ", "mimetype": "text/plain", "start_char_idx": 16012, "end_char_idx": 16090, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "337fcab1-fcb4-4a29-9db8-82b36a72ecfb": {"__data__": {"id_": "337fcab1-fcb4-4a29-9db8-82b36a72ecfb", "embedding": null, "metadata": {"window": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. ", "original_text": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fbc8a8ff-692e-48fc-afad-81ec2e7777e2", "node_type": "1", "metadata": {"window": "Figure 2.1: Screenshot of the GUI app to inspect models\u2019 predictions\n7\n\nMean Average Precision\nThe GUI is a nice tool to inspect the model\u2019s predictions, however it is still a far cry from hav-\ning actual testing datasets and metrics. In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. ", "original_text": "The mean average precision (mAP) is the\nstandard metric for object detectors. "}, "hash": "9171f07400338d554da9848e5a024816277a183b02446f50652e816338c931c0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "262c1bee-ab30-4056-8a25-480f8304420e", "node_type": "1", "metadata": {"window": "With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. ", "original_text": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. "}, "hash": "000845cd0166b5c21665668ae0b8fc1345c551b2b21f545a189ff038f23934be", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. ", "mimetype": "text/plain", "start_char_idx": 16090, "end_char_idx": 16262, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "262c1bee-ab30-4056-8a25-480f8304420e": {"__data__": {"id_": "262c1bee-ab30-4056-8a25-480f8304420e", "embedding": null, "metadata": {"window": "With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. ", "original_text": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "337fcab1-fcb4-4a29-9db8-82b36a72ecfb", "node_type": "1", "metadata": {"window": "In fact, visual inspection is both time-consuming and\nlimited in its ability to measure small improvements, while also being affected by confirmation\nbias. With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. ", "original_text": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. "}, "hash": "e1eadcb90881c7d091a53c51a080d5b73f7cdf1e3af3e23e3a8091ee0aca14c5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2e55fa52-a34c-4427-b497-a98c074589af", "node_type": "1", "metadata": {"window": "In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n", "original_text": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). "}, "hash": "78677f120bb5a274f0fbe40367b64d3dec9e362809d8289a16742132daeccc3f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. ", "mimetype": "text/plain", "start_char_idx": 16262, "end_char_idx": 16367, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2e55fa52-a34c-4427-b497-a98c074589af": {"__data__": {"id_": "2e55fa52-a34c-4427-b497-a98c074589af", "embedding": null, "metadata": {"window": "In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n", "original_text": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "262c1bee-ab30-4056-8a25-480f8304420e", "node_type": "1", "metadata": {"window": "With currently ongoing annotation efforts, the test sets will soon finally be available. In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. ", "original_text": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. "}, "hash": "000845cd0166b5c21665668ae0b8fc1345c551b2b21f545a189ff038f23934be", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe71d38c-30df-4a7c-a675-873b589f5b1a", "node_type": "1", "metadata": {"window": "The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n", "original_text": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. "}, "hash": "9bce028917e6c1fe2096e5b83db8f7c0eb976457adf56d12b9b0d745ad6774db", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). ", "mimetype": "text/plain", "start_char_idx": 16367, "end_char_idx": 16612, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe71d38c-30df-4a7c-a675-873b589f5b1a": {"__data__": {"id_": "fe71d38c-30df-4a7c-a675-873b589f5b1a", "embedding": null, "metadata": {"window": "The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n", "original_text": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2e55fa52-a34c-4427-b497-a98c074589af", "node_type": "1", "metadata": {"window": "In\nanticipation of that, I developed the relevant metrics. The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n", "original_text": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). "}, "hash": "78677f120bb5a274f0fbe40367b64d3dec9e362809d8289a16742132daeccc3f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ff83caf1-b91c-4092-a387-039161a5ddbb", "node_type": "1", "metadata": {"window": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. ", "original_text": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. "}, "hash": "6839c1cad36e4671a344ccbe6ae8ac64b8743a61f46ab0aa757cfeb61bb2363b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. ", "mimetype": "text/plain", "start_char_idx": 16612, "end_char_idx": 16681, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ff83caf1-b91c-4092-a387-039161a5ddbb": {"__data__": {"id_": "ff83caf1-b91c-4092-a387-039161a5ddbb", "embedding": null, "metadata": {"window": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. ", "original_text": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe71d38c-30df-4a7c-a675-873b589f5b1a", "node_type": "1", "metadata": {"window": "The mean average precision (mAP) is the\nstandard metric for object detectors. In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n", "original_text": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. "}, "hash": "9bce028917e6c1fe2096e5b83db8f7c0eb976457adf56d12b9b0d745ad6774db", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "69a7e9fd-2701-4ce0-a07c-5449976d4818", "node_type": "1", "metadata": {"window": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. ", "original_text": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. "}, "hash": "541b1eeaa0195d4022ca618524d3ba60e36542012eb9262a8553396fe9b2dd0a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. ", "mimetype": "text/plain", "start_char_idx": 16681, "end_char_idx": 16800, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "69a7e9fd-2701-4ce0-a07c-5449976d4818": {"__data__": {"id_": "69a7e9fd-2701-4ce0-a07c-5449976d4818", "embedding": null, "metadata": {"window": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. ", "original_text": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ff83caf1-b91c-4092-a387-039161a5ddbb", "node_type": "1", "metadata": {"window": "In short, it is obtained by computing the average preci-\nsion (AP) across of set of IoU (Intersection over Union) thresholds for each class and taking the\nmean of all APs. The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. ", "original_text": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. "}, "hash": "6839c1cad36e4671a344ccbe6ae8ac64b8743a61f46ab0aa757cfeb61bb2363b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c", "node_type": "1", "metadata": {"window": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. ", "original_text": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n"}, "hash": "574fcec98a86e8813f013fb360f8738b293ea3faf185e1d4fee84192c7889f42", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. ", "mimetype": "text/plain", "start_char_idx": 16800, "end_char_idx": 16891, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c": {"__data__": {"id_": "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c", "embedding": null, "metadata": {"window": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. ", "original_text": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "69a7e9fd-2701-4ce0-a07c-5449976d4818", "node_type": "1", "metadata": {"window": "The AP is related to the area under the precision-recall (PR) curve, although\nit isn\u2019t exactly the same. In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. ", "original_text": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. "}, "hash": "541b1eeaa0195d4022ca618524d3ba60e36542012eb9262a8553396fe9b2dd0a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b34528f6-bfda-4d05-905e-4637888d1f3f", "node_type": "1", "metadata": {"window": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. ", "original_text": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n"}, "hash": "18fe14a1e68f16335046152f91a080ca592b81cedfec0219994b78c93fec4884", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n", "mimetype": "text/plain", "start_char_idx": 16891, "end_char_idx": 16986, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b34528f6-bfda-4d05-905e-4637888d1f3f": {"__data__": {"id_": "b34528f6-bfda-4d05-905e-4637888d1f3f", "embedding": null, "metadata": {"window": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. ", "original_text": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c", "node_type": "1", "metadata": {"window": "In object detection, the PR curve depends on the IoU threshold, as it\ndetermines how strict the requirement is for a ground truth and a predicted bounding box to\nmatch (hence, it sets the boundary between true positives, false positives, etc.). For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. ", "original_text": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n"}, "hash": "574fcec98a86e8813f013fb360f8738b293ea3faf185e1d4fee84192c7889f42", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a", "node_type": "1", "metadata": {"window": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n", "original_text": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. "}, "hash": "7b2fde52824114fa6aa884c583e9636a0c7ee8dcdd4002533aba168697ae7c65", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n", "mimetype": "text/plain", "start_char_idx": 16986, "end_char_idx": 17184, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a": {"__data__": {"id_": "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a", "embedding": null, "metadata": {"window": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n", "original_text": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b34528f6-bfda-4d05-905e-4637888d1f3f", "node_type": "1", "metadata": {"window": "For each IoU\nthreshold, one PR curve and one AP measure is obtained. The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. ", "original_text": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n"}, "hash": "18fe14a1e68f16335046152f91a080ca592b81cedfec0219994b78c93fec4884", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129", "node_type": "1", "metadata": {"window": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. ", "original_text": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. "}, "hash": "2035091b1470acb295f5f3590af06f271a21923eaaccb03fe6d5ad0d8e5b1121", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. ", "mimetype": "text/plain", "start_char_idx": 17184, "end_char_idx": 17351, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129": {"__data__": {"id_": "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129", "embedding": null, "metadata": {"window": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. ", "original_text": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a", "node_type": "1", "metadata": {"window": "The mAP has the big advantage\nof being independent of the IoU threshold, which is important to evaluate models fairly. Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n", "original_text": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. "}, "hash": "7b2fde52824114fa6aa884c583e9636a0c7ee8dcdd4002533aba168697ae7c65", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bc26c442-0638-4ecb-8ede-0d9fad749592", "node_type": "1", "metadata": {"window": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. ", "original_text": "It is therefore critical for the model\u2019s performance. "}, "hash": "eae8e29b6beea6bea846ea7bcf7b46e64bf5204a23277bfa2185a2f7cc81e9b6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. ", "mimetype": "text/plain", "start_char_idx": 17351, "end_char_idx": 17516, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bc26c442-0638-4ecb-8ede-0d9fad749592": {"__data__": {"id_": "bc26c442-0638-4ecb-8ede-0d9fad749592", "embedding": null, "metadata": {"window": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. ", "original_text": "It is therefore critical for the model\u2019s performance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129", "node_type": "1", "metadata": {"window": "Its\nimplementation in the torchmetrics library made it easy to integrate in the framework. During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. ", "original_text": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. "}, "hash": "2035091b1470acb295f5f3590af06f271a21923eaaccb03fe6d5ad0d8e5b1121", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dfdb0fe1-039f-46db-86b9-0a10d5744edd", "node_type": "1", "metadata": {"window": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. ", "original_text": "A common\nway to choose the score threshold is from the precision recall curve. "}, "hash": "a21b0bcae443a61a4cdabb18600c6c1a57bc310840e060b736481f46af850323", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It is therefore critical for the model\u2019s performance. ", "mimetype": "text/plain", "start_char_idx": 17516, "end_char_idx": 17570, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dfdb0fe1-039f-46db-86b9-0a10d5744edd": {"__data__": {"id_": "dfdb0fe1-039f-46db-86b9-0a10d5744edd", "embedding": null, "metadata": {"window": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. ", "original_text": "A common\nway to choose the score threshold is from the precision recall curve. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bc26c442-0638-4ecb-8ede-0d9fad749592", "node_type": "1", "metadata": {"window": "During\ntrainings, the mAP is computed on the evaluation set at each epoch and uploaded to W&B.\n Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. ", "original_text": "It is therefore critical for the model\u2019s performance. "}, "hash": "eae8e29b6beea6bea846ea7bcf7b46e64bf5204a23277bfa2185a2f7cc81e9b6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a50c3e99-7093-45d1-86cd-a513f3206a01", "node_type": "1", "metadata": {"window": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. ", "original_text": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n"}, "hash": "55b11b12c7e5598a2c369b4ecffb57ee23ab6e1a50fc6f032c4d0b4ddf0926fe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A common\nway to choose the score threshold is from the precision recall curve. ", "mimetype": "text/plain", "start_char_idx": 17570, "end_char_idx": 17649, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a50c3e99-7093-45d1-86cd-a513f3206a01": {"__data__": {"id_": "a50c3e99-7093-45d1-86cd-a513f3206a01", "embedding": null, "metadata": {"window": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. ", "original_text": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dfdb0fe1-039f-46db-86b9-0a10d5744edd", "node_type": "1", "metadata": {"window": "Provided that the evaluation set is fixed, this offers a quick way to compare training on the\nW&B - much better than using the model\u2019s loss, which can change across models and is not\ninterpretable.\n Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. ", "original_text": "A common\nway to choose the score threshold is from the precision recall curve. "}, "hash": "a21b0bcae443a61a4cdabb18600c6c1a57bc310840e060b736481f46af850323", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f7dd4111-a43b-4e70-93a1-45a75074060b", "node_type": "1", "metadata": {"window": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. ", "original_text": "The GUI is presented in Fig.2.2. "}, "hash": "7c2d38ed2bb6e3f3a733bfe4a07b598a6daa8a1e8cc10bfd480f8ba53a5c6ec8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n", "mimetype": "text/plain", "start_char_idx": 17649, "end_char_idx": 17769, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f7dd4111-a43b-4e70-93a1-45a75074060b": {"__data__": {"id_": "f7dd4111-a43b-4e70-93a1-45a75074060b", "embedding": null, "metadata": {"window": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. ", "original_text": "The GUI is presented in Fig.2.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a50c3e99-7093-45d1-86cd-a513f3206a01", "node_type": "1", "metadata": {"window": "Threshold analysis\nWhile being a useful metric to compare models, the mAP doesn\u2019t help in setting the optimal\nthreshold of the chosen model, which is a critical task. Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. ", "original_text": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n"}, "hash": "55b11b12c7e5598a2c369b4ecffb57ee23ab6e1a50fc6f032c4d0b4ddf0926fe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b943a226-2bd6-4d14-a178-5e1cc7732450", "node_type": "1", "metadata": {"window": "It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n", "original_text": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. "}, "hash": "4332dca19f1fac65e488083e2e9e24245138704a3b0eacaedcf9fcaacafe83a6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The GUI is presented in Fig.2.2. ", "mimetype": "text/plain", "start_char_idx": 17769, "end_char_idx": 17802, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b943a226-2bd6-4d14-a178-5e1cc7732450": {"__data__": {"id_": "b943a226-2bd6-4d14-a178-5e1cc7732450", "embedding": null, "metadata": {"window": "It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n", "original_text": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f7dd4111-a43b-4e70-93a1-45a75074060b", "node_type": "1", "metadata": {"window": "Not to be confused with the IoU thresh-\nold mentioned above, the score threshold determines whether a predicted bounding box is kept\nin the final prediction or not. It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. ", "original_text": "The GUI is presented in Fig.2.2. "}, "hash": "7c2d38ed2bb6e3f3a733bfe4a07b598a6daa8a1e8cc10bfd480f8ba53a5c6ec8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f28eb04f-c87b-4361-a74a-8b1d453840d2", "node_type": "1", "metadata": {"window": "A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. ", "original_text": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. "}, "hash": "685b77f8cbb4ec20a117b9f91d274e5921207390d69a7437f72ac363d200cea0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. ", "mimetype": "text/plain", "start_char_idx": 17802, "end_char_idx": 17966, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f28eb04f-c87b-4361-a74a-8b1d453840d2": {"__data__": {"id_": "f28eb04f-c87b-4361-a74a-8b1d453840d2", "embedding": null, "metadata": {"window": "A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. ", "original_text": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b943a226-2bd6-4d14-a178-5e1cc7732450", "node_type": "1", "metadata": {"window": "It is therefore critical for the model\u2019s performance. A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n", "original_text": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. "}, "hash": "4332dca19f1fac65e488083e2e9e24245138704a3b0eacaedcf9fcaacafe83a6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "38bc428d-6a40-480a-af6e-c4a4491d85c9", "node_type": "1", "metadata": {"window": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. ", "original_text": "the proportion of correct insect detections among\nall detections) and recall (i.e. "}, "hash": "c879caf6e44e4243215ea91ff86de07d91af4e243bd28712ec26d26759a4f405", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. ", "mimetype": "text/plain", "start_char_idx": 17966, "end_char_idx": 18090, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "38bc428d-6a40-480a-af6e-c4a4491d85c9": {"__data__": {"id_": "38bc428d-6a40-480a-af6e-c4a4491d85c9", "embedding": null, "metadata": {"window": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. ", "original_text": "the proportion of correct insect detections among\nall detections) and recall (i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f28eb04f-c87b-4361-a74a-8b1d453840d2", "node_type": "1", "metadata": {"window": "A common\nway to choose the score threshold is from the precision recall curve. To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. ", "original_text": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. "}, "hash": "685b77f8cbb4ec20a117b9f91d274e5921207390d69a7437f72ac363d200cea0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ced0748f-8840-4758-808f-9c7c25da7617", "node_type": "1", "metadata": {"window": "The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. ", "original_text": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. "}, "hash": "7291bcba8de4c28da7d30d4265398b8c9874f9e50c8512ce8a713bb052686afe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "the proportion of correct insect detections among\nall detections) and recall (i.e. ", "mimetype": "text/plain", "start_char_idx": 18090, "end_char_idx": 18173, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ced0748f-8840-4758-808f-9c7c25da7617": {"__data__": {"id_": "ced0748f-8840-4758-808f-9c7c25da7617", "embedding": null, "metadata": {"window": "The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. ", "original_text": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "38bc428d-6a40-480a-af6e-c4a4491d85c9", "node_type": "1", "metadata": {"window": "To make this operation pos-\nsible, the computation of the PR curve was implemented from scratch, and a GUI was created.\n The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. ", "original_text": "the proportion of correct insect detections among\nall detections) and recall (i.e. "}, "hash": "c879caf6e44e4243215ea91ff86de07d91af4e243bd28712ec26d26759a4f405", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c619a43-f240-49a7-91fc-c5bc46fb2fa9", "node_type": "1", "metadata": {"window": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. ", "original_text": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n"}, "hash": "ed7da96a669d1ba227c8b94ff72f4af123a80cb969b75ac9b34ad23ff7e6884b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. ", "mimetype": "text/plain", "start_char_idx": 18173, "end_char_idx": 18276, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c619a43-f240-49a7-91fc-c5bc46fb2fa9": {"__data__": {"id_": "6c619a43-f240-49a7-91fc-c5bc46fb2fa9", "embedding": null, "metadata": {"window": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. ", "original_text": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ced0748f-8840-4758-808f-9c7c25da7617", "node_type": "1", "metadata": {"window": "The GUI is presented in Fig.2.2. It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. ", "original_text": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. "}, "hash": "7291bcba8de4c28da7d30d4265398b8c9874f9e50c8512ce8a713bb052686afe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c8798be2-44e1-4bc0-bc4c-027e842b425a", "node_type": "1", "metadata": {"window": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. ", "original_text": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. "}, "hash": "43f41645e26166862c971ce074f0863299f3be98212f092853c1123f0180e9bd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n", "mimetype": "text/plain", "start_char_idx": 18276, "end_char_idx": 18404, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c8798be2-44e1-4bc0-bc4c-027e842b425a": {"__data__": {"id_": "c8798be2-44e1-4bc0-bc4c-027e842b425a", "embedding": null, "metadata": {"window": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. ", "original_text": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c619a43-f240-49a7-91fc-c5bc46fb2fa9", "node_type": "1", "metadata": {"window": "It takes as inputs the IoU threshold(s) at which to compute\nthe PR curve, and the paths to the json files containing the ground truths and the model pre-\ndictions. The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. ", "original_text": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n"}, "hash": "ed7da96a669d1ba227c8b94ff72f4af123a80cb969b75ac9b34ad23ff7e6884b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b4f1628e-9280-4b3f-8ba5-96d2081b87dc", "node_type": "1", "metadata": {"window": "the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. ", "original_text": "T HE TRAINING DATA\n2.2.1. "}, "hash": "37ea05d9b460153b7a6175aeee19f1b114f98eabafd4245a38444d387bb4db5b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. ", "mimetype": "text/plain", "start_char_idx": 18404, "end_char_idx": 18470, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b4f1628e-9280-4b3f-8ba5-96d2081b87dc": {"__data__": {"id_": "b4f1628e-9280-4b3f-8ba5-96d2081b87dc", "embedding": null, "metadata": {"window": "the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. ", "original_text": "T HE TRAINING DATA\n2.2.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c8798be2-44e1-4bc0-bc4c-027e842b425a", "node_type": "1", "metadata": {"window": "The user can set the model threshold and visualize the corresponding point on the\nPR curve, hence know what precision (i.e. the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. ", "original_text": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. "}, "hash": "43f41645e26166862c971ce074f0863299f3be98212f092853c1123f0180e9bd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f", "node_type": "1", "metadata": {"window": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n", "original_text": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. "}, "hash": "2830b8edc1989a037ee6bdca8e425591167d1d996d633134f3f8437277000fc5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "T HE TRAINING DATA\n2.2.1. ", "mimetype": "text/plain", "start_char_idx": 18470, "end_char_idx": 18496, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f": {"__data__": {"id_": "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f", "embedding": null, "metadata": {"window": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n", "original_text": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b4f1628e-9280-4b3f-8ba5-96d2081b87dc", "node_type": "1", "metadata": {"window": "the proportion of correct insect detections among\nall detections) and recall (i.e. the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. ", "original_text": "T HE TRAINING DATA\n2.2.1. "}, "hash": "37ea05d9b460153b7a6175aeee19f1b114f98eabafd4245a38444d387bb4db5b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ec66bbb0-1388-48e6-b91e-430c75f57caa", "node_type": "1", "metadata": {"window": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. ", "original_text": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. "}, "hash": "4779deb090af1ea16312df4d40055c745815f3e91c9aaa7071c84438ab628549", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. ", "mimetype": "text/plain", "start_char_idx": 18496, "end_char_idx": 18758, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ec66bbb0-1388-48e6-b91e-430c75f57caa": {"__data__": {"id_": "ec66bbb0-1388-48e6-b91e-430c75f57caa", "embedding": null, "metadata": {"window": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. ", "original_text": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f", "node_type": "1", "metadata": {"window": "the proportion of correct insect detections among all insects on\nthe images) to expect from the model. Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n", "original_text": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. "}, "hash": "2830b8edc1989a037ee6bdca8e425591167d1d996d633134f3f8437277000fc5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "836469b3-50f3-431d-8d1c-a0f6e11e23f5", "node_type": "1", "metadata": {"window": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. ", "original_text": "This is a clear example of data-shift. "}, "hash": "9bf428dca99bf4c63ebc87aa3cb6c53adaf62bd9cb94f9128e5fd6f58a305529", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. ", "mimetype": "text/plain", "start_char_idx": 18758, "end_char_idx": 18956, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "836469b3-50f3-431d-8d1c-a0f6e11e23f5": {"__data__": {"id_": "836469b3-50f3-431d-8d1c-a0f6e11e23f5", "embedding": null, "metadata": {"window": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. ", "original_text": "This is a clear example of data-shift. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ec66bbb0-1388-48e6-b91e-430c75f57caa", "node_type": "1", "metadata": {"window": "Given a testing dataset for a specific deployment, this\ninformation would be very valuable for the ecologists using the system.\n 8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. ", "original_text": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. "}, "hash": "4779deb090af1ea16312df4d40055c745815f3e91c9aaa7071c84438ab628549", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6", "node_type": "1", "metadata": {"window": "T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. ", "original_text": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. "}, "hash": "90be75a0682edf69be50498eebb4bec5798df0eca94e4e9c82432b802043b4b1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This is a clear example of data-shift. ", "mimetype": "text/plain", "start_char_idx": 18956, "end_char_idx": 18995, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6": {"__data__": {"id_": "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6", "embedding": null, "metadata": {"window": "T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. ", "original_text": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "836469b3-50f3-431d-8d1c-a0f6e11e23f5", "node_type": "1", "metadata": {"window": "8\n\nFigure 2.2: Precision-recall curve for threshold analysis\n2.2. T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. ", "original_text": "This is a clear example of data-shift. "}, "hash": "9bf428dca99bf4c63ebc87aa3cb6c53adaf62bd9cb94f9128e5fd6f58a305529", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df", "node_type": "1", "metadata": {"window": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. ", "original_text": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n"}, "hash": "e7e0f35a8ba51ddd0c4e0d50329766605f3d31ca01a6faf87403d69b63177fd5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. ", "mimetype": "text/plain", "start_char_idx": 18995, "end_char_idx": 19118, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df": {"__data__": {"id_": "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df", "embedding": null, "metadata": {"window": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. ", "original_text": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6", "node_type": "1", "metadata": {"window": "T HE TRAINING DATA\n2.2.1. The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. ", "original_text": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. "}, "hash": "90be75a0682edf69be50498eebb4bec5798df0eca94e4e9c82432b802043b4b1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "06b7671a-1846-4f01-87fc-b0a41e658fb4", "node_type": "1", "metadata": {"window": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. ", "original_text": "To test this hypothesis, a SSDlite model was trained. "}, "hash": "f16d50d5c3162d0413729c7abd66d8ff513ccb44ed3e6063c02dd69d085dcc1b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n", "mimetype": "text/plain", "start_char_idx": 19118, "end_char_idx": 19375, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "06b7671a-1846-4f01-87fc-b0a41e658fb4": {"__data__": {"id_": "06b7671a-1846-4f01-87fc-b0a41e658fb4", "embedding": null, "metadata": {"window": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. ", "original_text": "To test this hypothesis, a SSDlite model was trained. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df", "node_type": "1", "metadata": {"window": "The need for new training datasets\nAs mentioned before, the Faster R-CNN model was trained on a set of images on which bound-\ning boxes could accurately be inferred with classical computer vision techniques, due to the\nlow density of moths and clean background. A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. ", "original_text": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n"}, "hash": "e7e0f35a8ba51ddd0c4e0d50329766605f3d31ca01a6faf87403d69b63177fd5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "99a2f09a-5222-4606-88b0-cafd574b8889", "node_type": "1", "metadata": {"window": "This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n", "original_text": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. "}, "hash": "5a98bed1877a4a056da7186c3d44d0a8462cb17cd90bda7458a9e1722ff320f2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To test this hypothesis, a SSDlite model was trained. ", "mimetype": "text/plain", "start_char_idx": 19375, "end_char_idx": 19429, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "99a2f09a-5222-4606-88b0-cafd574b8889": {"__data__": {"id_": "99a2f09a-5222-4606-88b0-cafd574b8889", "embedding": null, "metadata": {"window": "This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n", "original_text": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "06b7671a-1846-4f01-87fc-b0a41e658fb4", "node_type": "1", "metadata": {"window": "A visual inspection of this dataset revealed that\nit is actually not from any of the current deployments2, and that a different data acquisition\ntechnique was used, leading to way less sharp edges. This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. ", "original_text": "To test this hypothesis, a SSDlite model was trained. "}, "hash": "f16d50d5c3162d0413729c7abd66d8ff513ccb44ed3e6063c02dd69d085dcc1b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ba72a6f6-4f8c-4898-a36a-efb97d735117", "node_type": "1", "metadata": {"window": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n", "original_text": "A number of images were\nused as a gap between training and validation sets, and thus were unused. "}, "hash": "bbf88c6125b2ce6721f784588506f44ef9fc00017778d9a2fa2dc2b742df1717", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. ", "mimetype": "text/plain", "start_char_idx": 19429, "end_char_idx": 19538, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ba72a6f6-4f8c-4898-a36a-efb97d735117": {"__data__": {"id_": "ba72a6f6-4f8c-4898-a36a-efb97d735117", "embedding": null, "metadata": {"window": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n", "original_text": "A number of images were\nused as a gap between training and validation sets, and thus were unused. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "99a2f09a-5222-4606-88b0-cafd574b8889", "node_type": "1", "metadata": {"window": "This is a clear example of data-shift. In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n", "original_text": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. "}, "hash": "5a98bed1877a4a056da7186c3d44d0a8462cb17cd90bda7458a9e1722ff320f2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8b188f3a-afb7-4cf3-9edc-19eed509bc96", "node_type": "1", "metadata": {"window": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. ", "original_text": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. "}, "hash": "b9b4a88f27f635732933182620a643191dbbc786153fd5974432bad1e92b06cb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A number of images were\nused as a gap between training and validation sets, and thus were unused. ", "mimetype": "text/plain", "start_char_idx": 19538, "end_char_idx": 19636, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8b188f3a-afb7-4cf3-9edc-19eed509bc96": {"__data__": {"id_": "8b188f3a-afb7-4cf3-9edc-19eed509bc96", "embedding": null, "metadata": {"window": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. ", "original_text": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ba72a6f6-4f8c-4898-a36a-efb97d735117", "node_type": "1", "metadata": {"window": "In\nthe past, quick tests with a SSDlite model \u2014the lightest and faster architecture available on\ntorchvision\u2014, had failed. I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n", "original_text": "A number of images were\nused as a gap between training and validation sets, and thus were unused. "}, "hash": "bbf88c6125b2ce6721f784588506f44ef9fc00017778d9a2fa2dc2b742df1717", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bca09590-b1ba-4ec9-a2fa-a5c243d12804", "node_type": "1", "metadata": {"window": "To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). ", "original_text": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. "}, "hash": "03c9158c67b5dd1bb32839463028f01bf62f11f28af7d11b73d880649681147d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. ", "mimetype": "text/plain", "start_char_idx": 19636, "end_char_idx": 19767, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bca09590-b1ba-4ec9-a2fa-a5c243d12804": {"__data__": {"id_": "bca09590-b1ba-4ec9-a2fa-a5c243d12804", "embedding": null, "metadata": {"window": "To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). ", "original_text": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8b188f3a-afb7-4cf3-9edc-19eed509bc96", "node_type": "1", "metadata": {"window": "I hypothesized that one of the reasons could have been data-shift:\nwhile the relatively heavy Faster R-CNN with ResNet-50-FPN backbone can handle the shift, a\nsmaller model is expected to have less generalization capacity, and hence be more sensible to it.\n To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. ", "original_text": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. "}, "hash": "b9b4a88f27f635732933182620a643191dbbc786153fd5974432bad1e92b06cb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ee44a729-ae02-4e9e-8cdd-aa947c80dc58", "node_type": "1", "metadata": {"window": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n", "original_text": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n"}, "hash": "933e18055b82d29845419e8a2e10bda484d8d034ae4c4575c7165f612fe55398", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. ", "mimetype": "text/plain", "start_char_idx": 19767, "end_char_idx": 19891, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ee44a729-ae02-4e9e-8cdd-aa947c80dc58": {"__data__": {"id_": "ee44a729-ae02-4e9e-8cdd-aa947c80dc58", "embedding": null, "metadata": {"window": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n", "original_text": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bca09590-b1ba-4ec9-a2fa-a5c243d12804", "node_type": "1", "metadata": {"window": "To test this hypothesis, a SSDlite model was trained. The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). ", "original_text": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. "}, "hash": "03c9158c67b5dd1bb32839463028f01bf62f11f28af7d11b73d880649681147d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb", "node_type": "1", "metadata": {"window": "A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. ", "original_text": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n"}, "hash": "93bf6b53bb414243bb87281567a1360907e39b6f8ca2cf50161e90317a2d369d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n", "mimetype": "text/plain", "start_char_idx": 19891, "end_char_idx": 20145, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb": {"__data__": {"id_": "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb", "embedding": null, "metadata": {"window": "A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. ", "original_text": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ee44a729-ae02-4e9e-8cdd-aa947c80dc58", "node_type": "1", "metadata": {"window": "The validation set was created by selecting\nimages after a certain time stamp, as opposed to a random split. A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n", "original_text": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n"}, "hash": "933e18055b82d29845419e8a2e10bda484d8d034ae4c4575c7165f612fe55398", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0b67da66-6933-4e1d-9364-b7f11e18922a", "node_type": "1", "metadata": {"window": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. ", "original_text": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. "}, "hash": "11ca171ec828fa748a9f5732f382740a3667cf5a94bd84907b7888ff7957b3ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n", "mimetype": "text/plain", "start_char_idx": 20145, "end_char_idx": 20244, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0b67da66-6933-4e1d-9364-b7f11e18922a": {"__data__": {"id_": "0b67da66-6933-4e1d-9364-b7f11e18922a", "embedding": null, "metadata": {"window": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. ", "original_text": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb", "node_type": "1", "metadata": {"window": "A number of images were\nused as a gap between training and validation sets, and thus were unused. This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. ", "original_text": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n"}, "hash": "93bf6b53bb414243bb87281567a1360907e39b6f8ca2cf50161e90317a2d369d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "27578e8e-c44c-4da5-892d-93679b7939c7", "node_type": "1", "metadata": {"window": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. ", "original_text": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). "}, "hash": "e803fd4358f8949860de94415a8b5be323e630d9a75968092971d3a3ecb9d33f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. ", "mimetype": "text/plain", "start_char_idx": 20244, "end_char_idx": 20356, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "27578e8e-c44c-4da5-892d-93679b7939c7": {"__data__": {"id_": "27578e8e-c44c-4da5-892d-93679b7939c7", "embedding": null, "metadata": {"window": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. ", "original_text": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0b67da66-6933-4e1d-9364-b7f11e18922a", "node_type": "1", "metadata": {"window": "This ensured a\ncertain diversity between validation and training images, as consecutive images can be very\nsimilar to one another. The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. ", "original_text": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. "}, "hash": "11ca171ec828fa748a9f5732f382740a3667cf5a94bd84907b7888ff7957b3ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7", "node_type": "1", "metadata": {"window": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. ", "original_text": "In the example, the largest square bounding box\nhas the highest confidence.\n"}, "hash": "5897be770633d00c135e5f9a33826bd64cea03cae96c41d0c4a9f3d9d760cd98", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). ", "mimetype": "text/plain", "start_char_idx": 20356, "end_char_idx": 20506, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7": {"__data__": {"id_": "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7", "embedding": null, "metadata": {"window": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. ", "original_text": "In the example, the largest square bounding box\nhas the highest confidence.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "27578e8e-c44c-4da5-892d-93679b7939c7", "node_type": "1", "metadata": {"window": "The model was tested on the validation set and on new images, and\nthe predictions were inspected with the apposite GUI app. As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. ", "original_text": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). "}, "hash": "e803fd4358f8949860de94415a8b5be323e630d9a75968092971d3a3ecb9d33f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "36134c7b-c6ec-425f-862c-8d08d2867463", "node_type": "1", "metadata": {"window": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. ", "original_text": "2.2.2. "}, "hash": "1122e6ffd891df77c32f4ea1668799c75af19014842634b047fa04934fc1c1e7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the example, the largest square bounding box\nhas the highest confidence.\n", "mimetype": "text/plain", "start_char_idx": 20506, "end_char_idx": 20582, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "36134c7b-c6ec-425f-862c-8d08d2867463": {"__data__": {"id_": "36134c7b-c6ec-425f-862c-8d08d2867463", "embedding": null, "metadata": {"window": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. ", "original_text": "2.2.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7", "node_type": "1", "metadata": {"window": "As shown in Fig.2.3, a significant\ndrop in performance was observed between validation images and target images, confirming\nthe initial hypothesis, and suggesting that a new training dataset was needed to facilitate the\nadoption of a light-weight model.\n 2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. ", "original_text": "In the example, the largest square bounding box\nhas the highest confidence.\n"}, "hash": "5897be770633d00c135e5f9a33826bd64cea03cae96c41d0c4a9f3d9d760cd98", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f", "node_type": "1", "metadata": {"window": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). ", "original_text": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. "}, "hash": "7ff7a89de12634db4d3e211e8be8ea4118ac3086e62636b18f025a5643664368", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.2.2. ", "mimetype": "text/plain", "start_char_idx": 20582, "end_char_idx": 20589, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f": {"__data__": {"id_": "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f", "embedding": null, "metadata": {"window": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). ", "original_text": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "36134c7b-c6ec-425f-862c-8d08d2867463", "node_type": "1", "metadata": {"window": "2Or at least, not from any of the deployments from which images available to the team at Mila are.\n 9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. ", "original_text": "2.2.2. "}, "hash": "1122e6ffd891df77c32f4ea1668799c75af19014842634b047fa04934fc1c1e7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "856fbdb6-50d9-43d1-907e-728e8e1c8bad", "node_type": "1", "metadata": {"window": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. ", "original_text": "Bounding boxes were derived automatically using the Faster R-CNN model. "}, "hash": "2f9894c6e3f8151eebb937a526885cad2fd0c4a9de5b2101259aee1e4052e911", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. ", "mimetype": "text/plain", "start_char_idx": 20589, "end_char_idx": 20796, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "856fbdb6-50d9-43d1-907e-728e8e1c8bad": {"__data__": {"id_": "856fbdb6-50d9-43d1-907e-728e8e1c8bad", "embedding": null, "metadata": {"window": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. ", "original_text": "Bounding boxes were derived automatically using the Faster R-CNN model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f", "node_type": "1", "metadata": {"window": "9\n\n(a) Old image\n (b) Recent image\nFigure 2.3: Performance comparison of a SSDlite model trained on old images. Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). ", "original_text": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. "}, "hash": "7ff7a89de12634db4d3e211e8be8ea4118ac3086e62636b18f025a5643664368", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7", "node_type": "1", "metadata": {"window": "In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). ", "original_text": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. "}, "hash": "4bdcb172b557b7e1c0db50b8f22303fd78887ebd0e5b07ab544c5f2f04b5d015", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Bounding boxes were derived automatically using the Faster R-CNN model. ", "mimetype": "text/plain", "start_char_idx": 20796, "end_char_idx": 20868, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7": {"__data__": {"id_": "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7", "embedding": null, "metadata": {"window": "In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). ", "original_text": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "856fbdb6-50d9-43d1-907e-728e8e1c8bad", "node_type": "1", "metadata": {"window": "Reasonable\npredictions are given across a wide spectrum of thresholds on the old images (a), while absurd\npredictions are made on the new images (b). In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. ", "original_text": "Bounding boxes were derived automatically using the Faster R-CNN model. "}, "hash": "2f9894c6e3f8151eebb937a526885cad2fd0c4a9de5b2101259aee1e4052e911", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e8c866d8-522c-4e53-9a29-5081916909e3", "node_type": "1", "metadata": {"window": "2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n", "original_text": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. "}, "hash": "ebab23e4da5aa8d525ee53d9727282d36ed272968cee86947b4fc055e41323e3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. ", "mimetype": "text/plain", "start_char_idx": 20868, "end_char_idx": 21002, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e8c866d8-522c-4e53-9a29-5081916909e3": {"__data__": {"id_": "e8c866d8-522c-4e53-9a29-5081916909e3", "embedding": null, "metadata": {"window": "2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n", "original_text": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7", "node_type": "1", "metadata": {"window": "In the example, the largest square bounding box\nhas the highest confidence.\n 2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). ", "original_text": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. "}, "hash": "4bdcb172b557b7e1c0db50b8f22303fd78887ebd0e5b07ab544c5f2f04b5d015", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5fcefd32-5a5b-4eca-bd48-b9d30470907a", "node_type": "1", "metadata": {"window": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n", "original_text": "bounding boxes that group multiple moths together). "}, "hash": "2f15b63fa5fc5b0b0a03ceffc3db7b2c1225f2ca046aae4be47302be04038947", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. ", "mimetype": "text/plain", "start_char_idx": 21002, "end_char_idx": 21079, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5fcefd32-5a5b-4eca-bd48-b9d30470907a": {"__data__": {"id_": "5fcefd32-5a5b-4eca-bd48-b9d30470907a", "embedding": null, "metadata": {"window": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n", "original_text": "bounding boxes that group multiple moths together). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e8c866d8-522c-4e53-9a29-5081916909e3", "node_type": "1", "metadata": {"window": "2.2.2. Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n", "original_text": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. "}, "hash": "ebab23e4da5aa8d525ee53d9727282d36ed272968cee86947b4fc055e41323e3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b0e678a5-7403-4096-9648-173728b26a64", "node_type": "1", "metadata": {"window": "Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. ", "original_text": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. "}, "hash": "5148ffd3113bdfde1e5fa76a7ab27efc41b888ecbab95de39c86d05a9437a52f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "bounding boxes that group multiple moths together). ", "mimetype": "text/plain", "start_char_idx": 21079, "end_char_idx": 21131, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b0e678a5-7403-4096-9648-173728b26a64": {"__data__": {"id_": "b0e678a5-7403-4096-9648-173728b26a64", "embedding": null, "metadata": {"window": "Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. ", "original_text": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5fcefd32-5a5b-4eca-bd48-b9d30470907a", "node_type": "1", "metadata": {"window": "Garbage in, garbage out\nFollowing the findings described in the previous section, a dataset of two thousand images\nfrom four different deployments (Quebec, Vermont, Newfoundland and Panama) was assem-\nbled. Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n", "original_text": "bounding boxes that group multiple moths together). "}, "hash": "2f15b63fa5fc5b0b0a03ceffc3db7b2c1225f2ca046aae4be47302be04038947", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4adc7211-611e-453d-bf69-52cb7daa068c", "node_type": "1", "metadata": {"window": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). ", "original_text": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). "}, "hash": "071394498da139f517cf323ea26213fb8b333a780fa2d48dd7f938b6b5a2296b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. ", "mimetype": "text/plain", "start_char_idx": 21131, "end_char_idx": 21229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4adc7211-611e-453d-bf69-52cb7daa068c": {"__data__": {"id_": "4adc7211-611e-453d-bf69-52cb7daa068c", "embedding": null, "metadata": {"window": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). ", "original_text": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b0e678a5-7403-4096-9648-173728b26a64", "node_type": "1", "metadata": {"window": "Bounding boxes were derived automatically using the Faster R-CNN model. However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. ", "original_text": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. "}, "hash": "5148ffd3113bdfde1e5fa76a7ab27efc41b888ecbab95de39c86d05a9437a52f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "83435386-7aed-4c05-96e9-2661e2393c7f", "node_type": "1", "metadata": {"window": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n", "original_text": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n"}, "hash": "121f54a75236888aff46a4836e4fd99ac13bc8b000329599a70c1eb763330ec1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). ", "mimetype": "text/plain", "start_char_idx": 21229, "end_char_idx": 21430, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "83435386-7aed-4c05-96e9-2661e2393c7f": {"__data__": {"id_": "83435386-7aed-4c05-96e9-2661e2393c7f", "embedding": null, "metadata": {"window": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n", "original_text": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4adc7211-611e-453d-bf69-52cb7daa068c", "node_type": "1", "metadata": {"window": "However,\nan extensive inspection of the predictions revealed that the model was prone to two types of\nerrors: missed detections (i.e. false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). ", "original_text": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). "}, "hash": "071394498da139f517cf323ea26213fb8b333a780fa2d48dd7f938b6b5a2296b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "103ceb3c-99ab-4c6e-a42e-352c80efb7a3", "node_type": "1", "metadata": {"window": "bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. ", "original_text": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n"}, "hash": "2af08a1466576ae2936e03255c1185055f35d9e1512be55d1f3c309aa61ff744", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n", "mimetype": "text/plain", "start_char_idx": 21430, "end_char_idx": 21656, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "103ceb3c-99ab-4c6e-a42e-352c80efb7a3": {"__data__": {"id_": "103ceb3c-99ab-4c6e-a42e-352c80efb7a3", "embedding": null, "metadata": {"window": "bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. ", "original_text": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "83435386-7aed-4c05-96e9-2661e2393c7f", "node_type": "1", "metadata": {"window": "false negatives), especially on smaller moths, and double detec-\ntions (i.e. bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n", "original_text": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n"}, "hash": "121f54a75236888aff46a4836e4fd99ac13bc8b000329599a70c1eb763330ec1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fc8accd4-c848-4489-b58c-6a07aad18472", "node_type": "1", "metadata": {"window": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. ", "original_text": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. "}, "hash": "a87e7b3c8c4116031082be57dcbf387edc5630223893ed36d6356a33c0ee908a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n", "mimetype": "text/plain", "start_char_idx": 21656, "end_char_idx": 21717, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fc8accd4-c848-4489-b58c-6a07aad18472": {"__data__": {"id_": "fc8accd4-c848-4489-b58c-6a07aad18472", "embedding": null, "metadata": {"window": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. ", "original_text": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "103ceb3c-99ab-4c6e-a42e-352c80efb7a3", "node_type": "1", "metadata": {"window": "bounding boxes that group multiple moths together). Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. ", "original_text": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n"}, "hash": "2af08a1466576ae2936e03255c1185055f35d9e1512be55d1f3c309aa61ff744", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3", "node_type": "1", "metadata": {"window": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. ", "original_text": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). "}, "hash": "0dfba0adb4245778a4b86cc17e0d369881a0629b161a963cdd39208e46e6a89f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. ", "mimetype": "text/plain", "start_char_idx": 21717, "end_char_idx": 21865, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3": {"__data__": {"id_": "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3", "embedding": null, "metadata": {"window": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. ", "original_text": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fc8accd4-c848-4489-b58c-6a07aad18472", "node_type": "1", "metadata": {"window": "Additionally, the model would\noccasionally also make multiple predictions on the same large moth. Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. ", "original_text": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. "}, "hash": "a87e7b3c8c4116031082be57dcbf387edc5630223893ed36d6356a33c0ee908a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a4ee8a04-86a5-4017-9525-56e63de561d5", "node_type": "1", "metadata": {"window": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. ", "original_text": "Hence, the efforts to create a new training dataset were not over.\n"}, "hash": "d891242be403b2f2d7e0a9ae3e4b50b699e42a7aff8604ce7830bed04696ddd9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). ", "mimetype": "text/plain", "start_char_idx": 21865, "end_char_idx": 22016, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a4ee8a04-86a5-4017-9525-56e63de561d5": {"__data__": {"id_": "a4ee8a04-86a5-4017-9525-56e63de561d5", "embedding": null, "metadata": {"window": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. ", "original_text": "Hence, the efforts to create a new training dataset were not over.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3", "node_type": "1", "metadata": {"window": "Finally, the bounding\nboxes were often too loose around the insects, which makes them not ideal to train on (object\ndetection annotation best practices include drawing perfectly tight bounding boxes). These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. ", "original_text": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). "}, "hash": "0dfba0adb4245778a4b86cc17e0d369881a0629b161a963cdd39208e46e6a89f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a69a73e9-37d5-4bdb-90aa-27e37626b018", "node_type": "1", "metadata": {"window": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. ", "original_text": "2.2.3. "}, "hash": "80112f44c6fbbe12b4e887b6e9b0322a28bc2dfc93e328b0a7b29e2506f3bfe4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, the efforts to create a new training dataset were not over.\n", "mimetype": "text/plain", "start_char_idx": 22016, "end_char_idx": 22083, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a69a73e9-37d5-4bdb-90aa-27e37626b018": {"__data__": {"id_": "a69a73e9-37d5-4bdb-90aa-27e37626b018", "embedding": null, "metadata": {"window": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. ", "original_text": "2.2.3. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a4ee8a04-86a5-4017-9525-56e63de561d5", "node_type": "1", "metadata": {"window": "These\nerrors, some of which are displayed in Fig.2.4, reflect the deficiencies of the model\u2019s training\ndata, where small moths are often not annotated, moths are seldom close to one another and\nbounding boxes are often loose.\n Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. ", "original_text": "Hence, the efforts to create a new training dataset were not over.\n"}, "hash": "d891242be403b2f2d7e0a9ae3e4b50b699e42a7aff8604ce7830bed04696ddd9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8aae115f-7f07-402f-a31a-6c7d5a2a82e5", "node_type": "1", "metadata": {"window": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n", "original_text": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. "}, "hash": "cc3a59bc3c7f26dea3793fa929765ffbbc79744f305ed423a1de614136c6bc5d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.2.3. ", "mimetype": "text/plain", "start_char_idx": 22083, "end_char_idx": 22090, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8aae115f-7f07-402f-a31a-6c7d5a2a82e5": {"__data__": {"id_": "8aae115f-7f07-402f-a31a-6c7d5a2a82e5", "embedding": null, "metadata": {"window": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n", "original_text": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a69a73e9-37d5-4bdb-90aa-27e37626b018", "node_type": "1", "metadata": {"window": "Figure 2.4: Common mistakes from the old Faster R-CNN model.\n 10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. ", "original_text": "2.2.3. "}, "hash": "80112f44c6fbbe12b4e887b6e9b0322a28bc2dfc93e328b0a7b29e2506f3bfe4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6f2836ce-f722-49a5-880a-2bd978f5824f", "node_type": "1", "metadata": {"window": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. ", "original_text": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. "}, "hash": "d80f541dc6287ddf6df516f122c41e6204fe4d6d08524575863927e2eeaad002", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. ", "mimetype": "text/plain", "start_char_idx": 22090, "end_char_idx": 22367, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6f2836ce-f722-49a5-880a-2bd978f5824f": {"__data__": {"id_": "6f2836ce-f722-49a5-880a-2bd978f5824f", "embedding": null, "metadata": {"window": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. ", "original_text": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8aae115f-7f07-402f-a31a-6c7d5a2a82e5", "node_type": "1", "metadata": {"window": "10\n\nThe goal was no longer to make the object detector faster while maintaining the same level\nof accuracy: the accuracy needed to improve as well. Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n", "original_text": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. "}, "hash": "cc3a59bc3c7f26dea3793fa929765ffbbc79744f305ed423a1de614136c6bc5d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7a39664b-9d72-4b21-b589-3145f8d4321e", "node_type": "1", "metadata": {"window": "Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. ", "original_text": "its performance would degrade on natural images. "}, "hash": "7a82c489d51db6c49ea1d98dcdc5af6215391b9741c0f0d9a78fd91015df7c36", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. ", "mimetype": "text/plain", "start_char_idx": 22367, "end_char_idx": 22500, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7a39664b-9d72-4b21-b589-3145f8d4321e": {"__data__": {"id_": "7a39664b-9d72-4b21-b589-3145f8d4321e", "embedding": null, "metadata": {"window": "Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. ", "original_text": "its performance would degrade on natural images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6f2836ce-f722-49a5-880a-2bd978f5824f", "node_type": "1", "metadata": {"window": "Any model trained on the dataset\nwith Faster R-CNN\u2019s bounding boxes would replicate the same mistakes (as the saying goes,\n\"Garbage in, garbage out\"). Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. ", "original_text": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. "}, "hash": "d80f541dc6287ddf6df516f122c41e6204fe4d6d08524575863927e2eeaad002", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b1ee681e-2b56-47ce-ab40-dfb695afbd7c", "node_type": "1", "metadata": {"window": "2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. ", "original_text": "However, given the potential of\nthe idea, the gamble was taken. "}, "hash": "ae355aa187806e451486d1a64c929e11929b15bc33c715f161217f116591875d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "its performance would degrade on natural images. ", "mimetype": "text/plain", "start_char_idx": 22500, "end_char_idx": 22549, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b1ee681e-2b56-47ce-ab40-dfb695afbd7c": {"__data__": {"id_": "b1ee681e-2b56-47ce-ab40-dfb695afbd7c", "embedding": null, "metadata": {"window": "2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. ", "original_text": "However, given the potential of\nthe idea, the gamble was taken. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7a39664b-9d72-4b21-b589-3145f8d4321e", "node_type": "1", "metadata": {"window": "Hence, the efforts to create a new training dataset were not over.\n 2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. ", "original_text": "its performance would degrade on natural images. "}, "hash": "7a82c489d51db6c49ea1d98dcdc5af6215391b9741c0f0d9a78fd91015df7c36", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9905f06a-f993-48a2-ad7f-9756ab1ebcf2", "node_type": "1", "metadata": {"window": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. ", "original_text": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n"}, "hash": "56b1f127b8d02797178b36811dce89a4c53e6ba0f0523f465e734cea7bd620b6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, given the potential of\nthe idea, the gamble was taken. ", "mimetype": "text/plain", "start_char_idx": 22549, "end_char_idx": 22613, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9905f06a-f993-48a2-ad7f-9756ab1ebcf2": {"__data__": {"id_": "9905f06a-f993-48a2-ad7f-9756ab1ebcf2", "embedding": null, "metadata": {"window": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. ", "original_text": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b1ee681e-2b56-47ce-ab40-dfb695afbd7c", "node_type": "1", "metadata": {"window": "2.2.3. Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. ", "original_text": "However, given the potential of\nthe idea, the gamble was taken. "}, "hash": "ae355aa187806e451486d1a64c929e11929b15bc33c715f161217f116591875d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "55488926-b0b1-4a6c-90bf-291b7e7dd687", "node_type": "1", "metadata": {"window": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. ", "original_text": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. "}, "hash": "26cb89309ddca69c657892c2466a1ec9edc2b7820de2d92d82da5dfea43cfcc4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n", "mimetype": "text/plain", "start_char_idx": 22613, "end_char_idx": 22749, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "55488926-b0b1-4a6c-90bf-291b7e7dd687": {"__data__": {"id_": "55488926-b0b1-4a6c-90bf-291b7e7dd687", "embedding": null, "metadata": {"window": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. ", "original_text": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9905f06a-f993-48a2-ad7f-9756ab1ebcf2", "node_type": "1", "metadata": {"window": "Training data synthesis\nOne bold idea to create new training data was suggested by a teammate: use Meta\u2019s recently\nreleased Segment Anything Model (SAM, [24]) to crop insects, and paste the crops on empty\nbackground images; in other worlds, to synthesize the training dataset. An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. ", "original_text": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n"}, "hash": "56b1f127b8d02797178b36811dce89a4c53e6ba0f0523f465e734cea7bd620b6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e88e1403-b9fd-4768-897f-fba304b8c821", "node_type": "1", "metadata": {"window": "its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n", "original_text": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. "}, "hash": "3cd361950e8610e7c8d5d711a1f3905656d6662c72f864ffe00378c2a03413f4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. ", "mimetype": "text/plain", "start_char_idx": 22749, "end_char_idx": 22879, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e88e1403-b9fd-4768-897f-fba304b8c821": {"__data__": {"id_": "e88e1403-b9fd-4768-897f-fba304b8c821", "embedding": null, "metadata": {"window": "its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n", "original_text": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "55488926-b0b1-4a6c-90bf-291b7e7dd687", "node_type": "1", "metadata": {"window": "An immediate concern\nwas that a model trained on such a dataset would learn to detect pasted objects instead of actual\ninsects, i.e. its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. ", "original_text": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. "}, "hash": "26cb89309ddca69c657892c2466a1ec9edc2b7820de2d92d82da5dfea43cfcc4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "df2e7508-c250-4143-a6f0-b956977316b5", "node_type": "1", "metadata": {"window": "However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). ", "original_text": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. "}, "hash": "743d83679773d334885fe6db87cb16ec118caf919ef815153d45bc167b1d655c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. ", "mimetype": "text/plain", "start_char_idx": 22879, "end_char_idx": 22993, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "df2e7508-c250-4143-a6f0-b956977316b5": {"__data__": {"id_": "df2e7508-c250-4143-a6f0-b956977316b5", "embedding": null, "metadata": {"window": "However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). ", "original_text": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e88e1403-b9fd-4768-897f-fba304b8c821", "node_type": "1", "metadata": {"window": "its performance would degrade on natural images. However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n", "original_text": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. "}, "hash": "3cd361950e8610e7c8d5d711a1f3905656d6662c72f864ffe00378c2a03413f4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f26eca50-4efd-42cd-be49-1b4055ad1b92", "node_type": "1", "metadata": {"window": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. ", "original_text": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. "}, "hash": "36ca1c9c66ada5596823e899ca3241fa81f600fdabfc637b69669fc83259a4a2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. ", "mimetype": "text/plain", "start_char_idx": 22993, "end_char_idx": 23241, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f26eca50-4efd-42cd-be49-1b4055ad1b92": {"__data__": {"id_": "f26eca50-4efd-42cd-be49-1b4055ad1b92", "embedding": null, "metadata": {"window": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. ", "original_text": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "df2e7508-c250-4143-a6f0-b956977316b5", "node_type": "1", "metadata": {"window": "However, given the potential of\nthe idea, the gamble was taken. The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). ", "original_text": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. "}, "hash": "743d83679773d334885fe6db87cb16ec118caf919ef815153d45bc167b1d655c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a", "node_type": "1", "metadata": {"window": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. ", "original_text": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. "}, "hash": "310190ddd3e7e23819ba4e3fff2080197abf4b1c1a58f6c217617ac2eed427a0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. ", "mimetype": "text/plain", "start_char_idx": 23241, "end_char_idx": 23434, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a": {"__data__": {"id_": "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a", "embedding": null, "metadata": {"window": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. ", "original_text": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f26eca50-4efd-42cd-be49-1b4055ad1b92", "node_type": "1", "metadata": {"window": "The pipeline consists of three steps: (i) run the inferences with\nSAM, (ii) manually filter the crops, (iii) and synthesize new images.\n Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. ", "original_text": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. "}, "hash": "36ca1c9c66ada5596823e899ca3241fa81f600fdabfc637b69669fc83259a4a2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "071c7c1a-0cea-411c-8400-38dac6ef5213", "node_type": "1", "metadata": {"window": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. ", "original_text": "Hence, the parameters were tweaked\naccordingly.\n"}, "hash": "1cd1942f2648c7affcf6f1dac8b023ad541f6fb508cc82183de72a8323940875", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. ", "mimetype": "text/plain", "start_char_idx": 23434, "end_char_idx": 23566, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "071c7c1a-0cea-411c-8400-38dac6ef5213": {"__data__": {"id_": "071c7c1a-0cea-411c-8400-38dac6ef5213", "embedding": null, "metadata": {"window": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. ", "original_text": "Hence, the parameters were tweaked\naccordingly.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a", "node_type": "1", "metadata": {"window": "Inferences with SAM\nA python command-line script was developed to run the SAM model with custom parameters\non the desired images. The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. ", "original_text": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. "}, "hash": "310190ddd3e7e23819ba4e3fff2080197abf4b1c1a58f6c217617ac2eed427a0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "15187e20-014d-4d6e-b797-a6d6824ec412", "node_type": "1", "metadata": {"window": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. ", "original_text": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). "}, "hash": "5734622ba9a6f5906d64166d2119fdde797484ed66040ea75dfe55faa6745470", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, the parameters were tweaked\naccordingly.\n", "mimetype": "text/plain", "start_char_idx": 23566, "end_char_idx": 23614, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "15187e20-014d-4d6e-b797-a6d6824ec412": {"__data__": {"id_": "15187e20-014d-4d6e-b797-a6d6824ec412", "embedding": null, "metadata": {"window": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. ", "original_text": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "071c7c1a-0cea-411c-8400-38dac6ef5213", "node_type": "1", "metadata": {"window": "The script can be used to generate bounding boxes, which are saved\nin a json file, and crops of detected objects. In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. ", "original_text": "Hence, the parameters were tweaked\naccordingly.\n"}, "hash": "1cd1942f2648c7affcf6f1dac8b023ad541f6fb508cc82183de72a8323940875", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf", "node_type": "1", "metadata": {"window": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n", "original_text": "The IDs 3 of the selected images were\nstored for reference. "}, "hash": "efba16c950483fb52124336922848c91657d709614cf56fdf6c794348d248997", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). ", "mimetype": "text/plain", "start_char_idx": 23614, "end_char_idx": 23755, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf": {"__data__": {"id_": "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf", "embedding": null, "metadata": {"window": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n", "original_text": "The IDs 3 of the selected images were\nstored for reference. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "15187e20-014d-4d6e-b797-a6d6824ec412", "node_type": "1", "metadata": {"window": "In the first case, SAM is used as an object detec-\ntor; unfortunately, while the predicted bounding boxes are perfectly tight, there are too many\nmistakes to consider SAM as a drop-in replacement of Faster R-CNN for the annotation of new\ndatasets. Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. ", "original_text": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). "}, "hash": "5734622ba9a6f5906d64166d2119fdde797484ed66040ea75dfe55faa6745470", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9138c195-ffd6-471c-9b0f-35c6f2bda660", "node_type": "1", "metadata": {"window": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. ", "original_text": "The image processing produced almost 4k crops. "}, "hash": "41fdc068ddf776f9cf645295569cf1a26e8e2061218e5e12f7a1ed10aac50b96", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The IDs 3 of the selected images were\nstored for reference. ", "mimetype": "text/plain", "start_char_idx": 23755, "end_char_idx": 23815, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9138c195-ffd6-471c-9b0f-35c6f2bda660": {"__data__": {"id_": "9138c195-ffd6-471c-9b0f-35c6f2bda660", "embedding": null, "metadata": {"window": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. ", "original_text": "The image processing produced almost 4k crops. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf", "node_type": "1", "metadata": {"window": "Common errors include missed detections of small moths, and wings that are con-\nsidered as separate objects; tweaking SAM\u2019s parameters to improve on one problem tended to\nmake the other worse. For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n", "original_text": "The IDs 3 of the selected images were\nstored for reference. "}, "hash": "efba16c950483fb52124336922848c91657d709614cf56fdf6c794348d248997", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4a65a2da-9355-4c04-8c5f-73de32763679", "node_type": "1", "metadata": {"window": "Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n", "original_text": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. "}, "hash": "17f46f4fe0361cc762a60efa69be1a0ebf26fa76a437f4fae39ea3c7801d9a74", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The image processing produced almost 4k crops. ", "mimetype": "text/plain", "start_char_idx": 23815, "end_char_idx": 23862, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4a65a2da-9355-4c04-8c5f-73de32763679": {"__data__": {"id_": "4a65a2da-9355-4c04-8c5f-73de32763679", "embedding": null, "metadata": {"window": "Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n", "original_text": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9138c195-ffd6-471c-9b0f-35c6f2bda660", "node_type": "1", "metadata": {"window": "For the following tasks, it was important not to miss the small moths, in\norder not to bias the final dataset towards larger moths. Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. ", "original_text": "The image processing produced almost 4k crops. "}, "hash": "41fdc068ddf776f9cf645295569cf1a26e8e2061218e5e12f7a1ed10aac50b96", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b", "node_type": "1", "metadata": {"window": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. ", "original_text": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. "}, "hash": "9ca545e7fa29a2f3c6a803bb68c2a98fdf9c84752b07e7039a744c6f349dde92", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. ", "mimetype": "text/plain", "start_char_idx": 23862, "end_char_idx": 24020, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b": {"__data__": {"id_": "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b", "embedding": null, "metadata": {"window": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. ", "original_text": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4a65a2da-9355-4c04-8c5f-73de32763679", "node_type": "1", "metadata": {"window": "Hence, the parameters were tweaked\naccordingly.\n A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n", "original_text": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. "}, "hash": "17f46f4fe0361cc762a60efa69be1a0ebf26fa76a437f4fae39ea3c7801d9a74", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fbc99444-b30e-47a3-ac23-14e533c66fa4", "node_type": "1", "metadata": {"window": "The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. ", "original_text": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n"}, "hash": "e7cf1ee1854c4286283187b5b55eec555805a85b119d6030d6e6183d13e8103c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. ", "mimetype": "text/plain", "start_char_idx": 24020, "end_char_idx": 24131, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fbc99444-b30e-47a3-ac23-14e533c66fa4": {"__data__": {"id_": "fbc99444-b30e-47a3-ac23-14e533c66fa4", "embedding": null, "metadata": {"window": "The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. ", "original_text": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b", "node_type": "1", "metadata": {"window": "A diverse collection of nearly 300 images was assembled from five different locations (Den-\nmark, Vermont, Quebec, Panama and Newfoundland). The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. ", "original_text": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. "}, "hash": "9ca545e7fa29a2f3c6a803bb68c2a98fdf9c84752b07e7039a744c6f349dde92", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bf0828b5-f174-4c8d-942d-f2db7aa26b4a", "node_type": "1", "metadata": {"window": "The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. ", "original_text": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. "}, "hash": "c88c6c6101952a5a3c9fbfa084ce1335196a7cc131f890d974be3cbacf433edd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n", "mimetype": "text/plain", "start_char_idx": 24131, "end_char_idx": 24294, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bf0828b5-f174-4c8d-942d-f2db7aa26b4a": {"__data__": {"id_": "bf0828b5-f174-4c8d-942d-f2db7aa26b4a", "embedding": null, "metadata": {"window": "The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. ", "original_text": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fbc99444-b30e-47a3-ac23-14e533c66fa4", "node_type": "1", "metadata": {"window": "The IDs 3 of the selected images were\nstored for reference. The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. ", "original_text": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n"}, "hash": "e7cf1ee1854c4286283187b5b55eec555805a85b119d6030d6e6183d13e8103c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f238aee1-b43d-458f-ae16-81150b5dd412", "node_type": "1", "metadata": {"window": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. ", "original_text": "Hence, there is still need for manual review.\n"}, "hash": "bc94241f1bd7ff3ab034af66cd08498aa61169099dbf9cb997fab53738761d4d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. ", "mimetype": "text/plain", "start_char_idx": 24294, "end_char_idx": 24385, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f238aee1-b43d-458f-ae16-81150b5dd412": {"__data__": {"id_": "f238aee1-b43d-458f-ae16-81150b5dd412", "embedding": null, "metadata": {"window": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. ", "original_text": "Hence, there is still need for manual review.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bf0828b5-f174-4c8d-942d-f2db7aa26b4a", "node_type": "1", "metadata": {"window": "The image processing produced almost 4k crops. Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. ", "original_text": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. "}, "hash": "c88c6c6101952a5a3c9fbfa084ce1335196a7cc131f890d974be3cbacf433edd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b8236aa4-0fa4-4a03-8812-213d0e9dab58", "node_type": "1", "metadata": {"window": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. ", "original_text": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. "}, "hash": "cbd0d38a59af4d4f611d1b39cb1f7c6f7672cdeb06d68d65208f7371ae614f9e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, there is still need for manual review.\n", "mimetype": "text/plain", "start_char_idx": 24385, "end_char_idx": 24431, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b8236aa4-0fa4-4a03-8812-213d0e9dab58": {"__data__": {"id_": "b8236aa4-0fa4-4a03-8812-213d0e9dab58", "embedding": null, "metadata": {"window": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. ", "original_text": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f238aee1-b43d-458f-ae16-81150b5dd412", "node_type": "1", "metadata": {"window": "Each crop of a detected\nobject consists of two arrays: one array is a crop of the image, the other is a boolean mask indi-\ncating the object inside the crop. Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. ", "original_text": "Hence, there is still need for manual review.\n"}, "hash": "bc94241f1bd7ff3ab034af66cd08498aa61169099dbf9cb997fab53738761d4d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "af402f64-e59f-4d61-b4d2-9f44e7591806", "node_type": "1", "metadata": {"window": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. ", "original_text": "This is because the only required operation is to delete wrong detections. "}, "hash": "d21e1c74f245937d8cc2ebd28855ab6a28f7c3b65e397bf79f9c3aec2ec1c05f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. ", "mimetype": "text/plain", "start_char_idx": 24431, "end_char_idx": 24548, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "af402f64-e59f-4d61-b4d2-9f44e7591806": {"__data__": {"id_": "af402f64-e59f-4d61-b4d2-9f44e7591806", "embedding": null, "metadata": {"window": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. ", "original_text": "This is because the only required operation is to delete wrong detections. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b8236aa4-0fa4-4a03-8812-213d0e9dab58", "node_type": "1", "metadata": {"window": "Each array is saved in the npy format, NumPy\u2019s standard for\npersisting a single arbitrary NumPy array on disk. The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. ", "original_text": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. "}, "hash": "cbd0d38a59af4d4f611d1b39cb1f7c6f7672cdeb06d68d65208f7371ae614f9e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9a29e474-b92b-4d2d-9eab-bf29e5dac261", "node_type": "1", "metadata": {"window": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. ", "original_text": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. "}, "hash": "bab726d818ce76f58964328894f244d0519365cd0991b28bb600e992e7d2c567", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This is because the only required operation is to delete wrong detections. ", "mimetype": "text/plain", "start_char_idx": 24548, "end_char_idx": 24623, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9a29e474-b92b-4d2d-9eab-bf29e5dac261": {"__data__": {"id_": "9a29e474-b92b-4d2d-9eab-bf29e5dac261", "embedding": null, "metadata": {"window": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. ", "original_text": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "af402f64-e59f-4d61-b4d2-9f44e7591806", "node_type": "1", "metadata": {"window": "The .npy files are zipped in a .npz file, with\na naming convention that allows to pair crops and their corresponding mask together when\nreading the file later on.\n Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. ", "original_text": "This is because the only required operation is to delete wrong detections. "}, "hash": "d21e1c74f245937d8cc2ebd28855ab6a28f7c3b65e397bf79f9c3aec2ec1c05f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "41a7601e-f84b-4cf6-a699-ba87c0f930f2", "node_type": "1", "metadata": {"window": "Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n", "original_text": "The tool developed for this task is a GUI application, presented in Fig.2.5. "}, "hash": "5d3f967ca158f96b6afae32fbd388bdd8035e32f8fbc67cb8e407b8dcc3a19cc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. ", "mimetype": "text/plain", "start_char_idx": 24623, "end_char_idx": 24727, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "41a7601e-f84b-4cf6-a699-ba87c0f930f2": {"__data__": {"id_": "41a7601e-f84b-4cf6-a699-ba87c0f930f2", "embedding": null, "metadata": {"window": "Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n", "original_text": "The tool developed for this task is a GUI application, presented in Fig.2.5. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9a29e474-b92b-4d2d-9eab-bf29e5dac261", "node_type": "1", "metadata": {"window": "Manual review of the segmented objects\nAs stated above, SAM\u2019s predictions are not perfect. Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. ", "original_text": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. "}, "hash": "bab726d818ce76f58964328894f244d0519365cd0991b28bb600e992e7d2c567", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "90caea20-c716-4371-89e1-f4ac63612885", "node_type": "1", "metadata": {"window": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. ", "original_text": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. "}, "hash": "4b11838887fab976c5b289d37697637d34a32bb9d37726e52e6d342bb1616ab5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The tool developed for this task is a GUI application, presented in Fig.2.5. ", "mimetype": "text/plain", "start_char_idx": 24727, "end_char_idx": 24804, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "90caea20-c716-4371-89e1-f4ac63612885": {"__data__": {"id_": "90caea20-c716-4371-89e1-f4ac63612885", "embedding": null, "metadata": {"window": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. ", "original_text": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "41a7601e-f84b-4cf6-a699-ba87c0f930f2", "node_type": "1", "metadata": {"window": "Hence, there is still need for manual review.\n Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n", "original_text": "The tool developed for this task is a GUI application, presented in Fig.2.5. "}, "hash": "5d3f967ca158f96b6afae32fbd388bdd8035e32f8fbc67cb8e407b8dcc3a19cc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ab6286ca-6f9f-4c76-82f5-7129afd57a7c", "node_type": "1", "metadata": {"window": "This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n", "original_text": "Bad\ncrops can be deleted in a split second by pressing the delete key. "}, "hash": "c8cc5b995208f6ee629f43a294db1a09fdeffaed02472fd73b1d7d7808e58d25", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. ", "mimetype": "text/plain", "start_char_idx": 24804, "end_char_idx": 24901, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ab6286ca-6f9f-4c76-82f5-7129afd57a7c": {"__data__": {"id_": "ab6286ca-6f9f-4c76-82f5-7129afd57a7c", "embedding": null, "metadata": {"window": "This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n", "original_text": "Bad\ncrops can be deleted in a split second by pressing the delete key. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "90caea20-c716-4371-89e1-f4ac63612885", "node_type": "1", "metadata": {"window": "Luckily, with the right tools, manual review is very fast: critically, it ismuch faster than drawing\nbounding boxes. This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. ", "original_text": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. "}, "hash": "4b11838887fab976c5b289d37697637d34a32bb9d37726e52e6d342bb1616ab5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1d7adba4-1b08-45eb-993e-19df387b769d", "node_type": "1", "metadata": {"window": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n", "original_text": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. "}, "hash": "96d7fd976f34c0a6949bfd2ea16bfa9c3b03c40c6da9f4934abb29245dea4aa4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Bad\ncrops can be deleted in a split second by pressing the delete key. ", "mimetype": "text/plain", "start_char_idx": 24901, "end_char_idx": 24972, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1d7adba4-1b08-45eb-993e-19df387b769d": {"__data__": {"id_": "1d7adba4-1b08-45eb-993e-19df387b769d", "embedding": null, "metadata": {"window": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n", "original_text": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ab6286ca-6f9f-4c76-82f5-7129afd57a7c", "node_type": "1", "metadata": {"window": "This is because the only required operation is to delete wrong detections. All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n", "original_text": "Bad\ncrops can be deleted in a split second by pressing the delete key. "}, "hash": "c8cc5b995208f6ee629f43a294db1a09fdeffaed02472fd73b1d7d7808e58d25", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d5c8ca45-9cae-4545-b92a-5a54ae6256d5", "node_type": "1", "metadata": {"window": "The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. ", "original_text": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n"}, "hash": "cf4e84891037ddb961ff6720802c1fa507ba94750157c7c6fbabcb6d3cf942e5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. ", "mimetype": "text/plain", "start_char_idx": 24972, "end_char_idx": 25141, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d5c8ca45-9cae-4545-b92a-5a54ae6256d5": {"__data__": {"id_": "d5c8ca45-9cae-4545-b92a-5a54ae6256d5", "embedding": null, "metadata": {"window": "The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. ", "original_text": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1d7adba4-1b08-45eb-993e-19df387b769d", "node_type": "1", "metadata": {"window": "All\ndetections are perfectly tight around the object, so there is no need to adjust the bounding\nboxes. The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n", "original_text": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. "}, "hash": "96d7fd976f34c0a6949bfd2ea16bfa9c3b03c40c6da9f4934abb29245dea4aa4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "01188e94-90f7-46cb-bd85-aef6c0902393", "node_type": "1", "metadata": {"window": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. ", "original_text": "11\n\ndialog box appears to ask whether to save the changes or not. "}, "hash": "5a2840a3332b9bfb702267b4ee4fc2c9dbc0bc607b9d7075f29fb29e348a4f68", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n", "mimetype": "text/plain", "start_char_idx": 25141, "end_char_idx": 25296, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "01188e94-90f7-46cb-bd85-aef6c0902393": {"__data__": {"id_": "01188e94-90f7-46cb-bd85-aef6c0902393", "embedding": null, "metadata": {"window": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. ", "original_text": "11\n\ndialog box appears to ask whether to save the changes or not. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d5c8ca45-9cae-4545-b92a-5a54ae6256d5", "node_type": "1", "metadata": {"window": "The tool developed for this task is a GUI application, presented in Fig.2.5. The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. ", "original_text": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n"}, "hash": "cf4e84891037ddb961ff6720802c1fa507ba94750157c7c6fbabcb6d3cf942e5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "feb82ae1-21c7-461a-9656-5530dab86794", "node_type": "1", "metadata": {"window": "Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. ", "original_text": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n"}, "hash": "d24e3b6aa2b35a4fbbaa124e1db781bd879ca94b04a888b1191d92905e20f5b7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "11\n\ndialog box appears to ask whether to save the changes or not. ", "mimetype": "text/plain", "start_char_idx": 25296, "end_char_idx": 25362, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "feb82ae1-21c7-461a-9656-5530dab86794": {"__data__": {"id_": "feb82ae1-21c7-461a-9656-5530dab86794", "embedding": null, "metadata": {"window": "Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. ", "original_text": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "01188e94-90f7-46cb-bd85-aef6c0902393", "node_type": "1", "metadata": {"window": "The user\ncan swipe through the crops read from the .npz file with the left and right arrow keys. Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. ", "original_text": "11\n\ndialog box appears to ask whether to save the changes or not. "}, "hash": "5a2840a3332b9bfb702267b4ee4fc2c9dbc0bc607b9d7075f29fb29e348a4f68", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "94ba5eab-52d1-4f7a-835d-0706fac6d63f", "node_type": "1", "metadata": {"window": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. ", "original_text": "Figure 2.5: GUI app to review SAM\u2019s crops.\n"}, "hash": "39a784054992700c995ec3d3b14e864a4b3e80ea4005947b5ccdb9d5d9a0ad42", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n", "mimetype": "text/plain", "start_char_idx": 25362, "end_char_idx": 25479, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "94ba5eab-52d1-4f7a-835d-0706fac6d63f": {"__data__": {"id_": "94ba5eab-52d1-4f7a-835d-0706fac6d63f", "embedding": null, "metadata": {"window": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. ", "original_text": "Figure 2.5: GUI app to review SAM\u2019s crops.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "feb82ae1-21c7-461a-9656-5530dab86794", "node_type": "1", "metadata": {"window": "Bad\ncrops can be deleted in a split second by pressing the delete key. When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. ", "original_text": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n"}, "hash": "d24e3b6aa2b35a4fbbaa124e1db781bd879ca94b04a888b1191d92905e20f5b7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6cde88d1-71f6-4571-9f86-d3336b290a1e", "node_type": "1", "metadata": {"window": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). ", "original_text": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. "}, "hash": "504c1368b38681667dd53f291ab25b05bf799f7c7659fcc9a9e8243934e4a8e9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.5: GUI app to review SAM\u2019s crops.\n", "mimetype": "text/plain", "start_char_idx": 25479, "end_char_idx": 25522, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6cde88d1-71f6-4571-9f86-d3336b290a1e": {"__data__": {"id_": "6cde88d1-71f6-4571-9f86-d3336b290a1e", "embedding": null, "metadata": {"window": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). ", "original_text": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "94ba5eab-52d1-4f7a-835d-0706fac6d63f", "node_type": "1", "metadata": {"window": "When the first crop is deleted,\nthe \"Save as\" and \"Discard\" buttons are activated, such that changes can be saved (under the\ndesired filename) or discarded at any time. If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. ", "original_text": "Figure 2.5: GUI app to review SAM\u2019s crops.\n"}, "hash": "39a784054992700c995ec3d3b14e864a4b3e80ea4005947b5ccdb9d5d9a0ad42", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3d49cc93-23e5-4276-b0e5-60a91fabaffa", "node_type": "1", "metadata": {"window": "11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. ", "original_text": "For each\nnew image, moths are pasted one after the other at random locations. "}, "hash": "98d332448219ef2586e78bb57099f1ceee7f8a0eac3da0bce42be25e6c65b951", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. ", "mimetype": "text/plain", "start_char_idx": 25522, "end_char_idx": 25805, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3d49cc93-23e5-4276-b0e5-60a91fabaffa": {"__data__": {"id_": "3d49cc93-23e5-4276-b0e5-60a91fabaffa", "embedding": null, "metadata": {"window": "11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. ", "original_text": "For each\nnew image, moths are pasted one after the other at random locations. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6cde88d1-71f6-4571-9f86-d3336b290a1e", "node_type": "1", "metadata": {"window": "If the window is closed with unsaved changes, a\n3Regrettably, there is actually no guarantee of uniqueness of the image filename across the whole project.\n 11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). ", "original_text": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. "}, "hash": "504c1368b38681667dd53f291ab25b05bf799f7c7659fcc9a9e8243934e4a8e9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457", "node_type": "1", "metadata": {"window": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n", "original_text": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. "}, "hash": "fae908cd1c36c8ce0a2c3b3d9da83837915a1ea1c1e994828d588a9d24f3b91b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For each\nnew image, moths are pasted one after the other at random locations. ", "mimetype": "text/plain", "start_char_idx": 25805, "end_char_idx": 25883, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457": {"__data__": {"id_": "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457", "embedding": null, "metadata": {"window": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n", "original_text": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3d49cc93-23e5-4276-b0e5-60a91fabaffa", "node_type": "1", "metadata": {"window": "11\n\ndialog box appears to ask whether to save the changes or not. The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. ", "original_text": "For each\nnew image, moths are pasted one after the other at random locations. "}, "hash": "98d332448219ef2586e78bb57099f1ceee7f8a0eac3da0bce42be25e6c65b951", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e97c892e-f6f6-4047-a7ae-f89a861ffc31", "node_type": "1", "metadata": {"window": "Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. ", "original_text": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. "}, "hash": "efbb0e0f54b01004ece55e98ec8e5949b7eef7b45bec1fd5931511de70a82557", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. ", "mimetype": "text/plain", "start_char_idx": 25883, "end_char_idx": 26039, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e97c892e-f6f6-4047-a7ae-f89a861ffc31": {"__data__": {"id_": "e97c892e-f6f6-4047-a7ae-f89a861ffc31", "embedding": null, "metadata": {"window": "Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. ", "original_text": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457", "node_type": "1", "metadata": {"window": "The user can also play with the\noverlay that appears on the segmented object, by making it more or less transparent.\n Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n", "original_text": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. "}, "hash": "fae908cd1c36c8ce0a2c3b3d9da83837915a1ea1c1e994828d588a9d24f3b91b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8", "node_type": "1", "metadata": {"window": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. ", "original_text": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). "}, "hash": "68d0e4143ca5f6d8ae439721f482cb3da1acbd04b2229f9c0c7760b0dddfc761", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. ", "mimetype": "text/plain", "start_char_idx": 26039, "end_char_idx": 26127, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8": {"__data__": {"id_": "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8", "embedding": null, "metadata": {"window": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. ", "original_text": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e97c892e-f6f6-4047-a7ae-f89a861ffc31", "node_type": "1", "metadata": {"window": "Figure 2.5: GUI app to review SAM\u2019s crops.\n Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. ", "original_text": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. "}, "hash": "efbb0e0f54b01004ece55e98ec8e5949b7eef7b45bec1fd5931511de70a82557", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e1676b5a-d733-447d-b786-492ee36fb73d", "node_type": "1", "metadata": {"window": "For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. ", "original_text": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. "}, "hash": "a5912e0c7750421f090aaa55f32d8f1ed5a305acf0dd37de86b3e50e5cf296a4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). ", "mimetype": "text/plain", "start_char_idx": 26127, "end_char_idx": 26236, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e1676b5a-d733-447d-b786-492ee36fb73d": {"__data__": {"id_": "e1676b5a-d733-447d-b786-492ee36fb73d", "embedding": null, "metadata": {"window": "For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. ", "original_text": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8", "node_type": "1", "metadata": {"window": "Image synthesis\nThe command line script developed to synthesize the new images has four main inputs: the\n.npz file with the crops, the path to a folder with background images, the number of new im-\nages to create from each background, and the number of crops to paste on each image. For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. ", "original_text": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). "}, "hash": "68d0e4143ca5f6d8ae439721f482cb3da1acbd04b2229f9c0c7760b0dddfc761", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8", "node_type": "1", "metadata": {"window": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. ", "original_text": "A synthetic image is displayed in Fig2.6 as an example.\n"}, "hash": "ede3e9b453ec43d67b4e218e11a3b6c9c18904aba8741212f27bbac5c6a0a3d6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. ", "mimetype": "text/plain", "start_char_idx": 26236, "end_char_idx": 26592, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8": {"__data__": {"id_": "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8", "embedding": null, "metadata": {"window": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. ", "original_text": "A synthetic image is displayed in Fig2.6 as an example.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e1676b5a-d733-447d-b786-492ee36fb73d", "node_type": "1", "metadata": {"window": "For each\nnew image, moths are pasted one after the other at random locations. Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. ", "original_text": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. "}, "hash": "a5912e0c7750421f090aaa55f32d8f1ed5a305acf0dd37de86b3e50e5cf296a4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7fba77f6-783d-4784-b197-98359743af41", "node_type": "1", "metadata": {"window": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. ", "original_text": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. "}, "hash": "f9417e4b8e545ef2090e8d18173fae6763a909ea76cf1077c9d051539b735355", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A synthetic image is displayed in Fig2.6 as an example.\n", "mimetype": "text/plain", "start_char_idx": 26592, "end_char_idx": 26648, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7fba77f6-783d-4784-b197-98359743af41": {"__data__": {"id_": "7fba77f6-783d-4784-b197-98359743af41", "embedding": null, "metadata": {"window": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. ", "original_text": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8", "node_type": "1", "metadata": {"window": "Collisions are taken care\nof: if a moth overlaps with one of the already pasted moths by more than a certain threshold4,\na new random position is selected. Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. ", "original_text": "A synthetic image is displayed in Fig2.6 as an example.\n"}, "hash": "ede3e9b453ec43d67b4e218e11a3b6c9c18904aba8741212f27bbac5c6a0a3d6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d", "node_type": "1", "metadata": {"window": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n", "original_text": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. "}, "hash": "680fdf474d78a67a5fbda575a3ea8d885d501ac5e2f5cc101e1a2266683d7ab6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. ", "mimetype": "text/plain", "start_char_idx": 26648, "end_char_idx": 26754, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d": {"__data__": {"id_": "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d", "embedding": null, "metadata": {"window": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n", "original_text": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7fba77f6-783d-4784-b197-98359743af41", "node_type": "1", "metadata": {"window": "Similarly, moths are not allowed to be out of the image by\nmore than a certain portion. Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. ", "original_text": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. "}, "hash": "f9417e4b8e545ef2090e8d18173fae6763a909ea76cf1077c9d051539b735355", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "81b8fcaa-24b0-4e36-a70f-36321c55cc4b", "node_type": "1", "metadata": {"window": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. ", "original_text": "For comparison, manual annotation of that many\nimages is expected to take at least a week. "}, "hash": "33a61cdde13d16f7afaac5161be08618b57e83cd58eed0a63522067078e6fb54", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. ", "mimetype": "text/plain", "start_char_idx": 26754, "end_char_idx": 26878, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "81b8fcaa-24b0-4e36-a70f-36321c55cc4b": {"__data__": {"id_": "81b8fcaa-24b0-4e36-a70f-36321c55cc4b", "embedding": null, "metadata": {"window": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. ", "original_text": "For comparison, manual annotation of that many\nimages is expected to take at least a week. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d", "node_type": "1", "metadata": {"window": "Two types of simple augmentations are used: rotations (90\u25e6, 180\u25e6,\n270\u25e6) and flips (horizontal and vertical). To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n", "original_text": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. "}, "hash": "680fdf474d78a67a5fbda575a3ea8d885d501ac5e2f5cc101e1a2266683d7ab6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "20cb007b-35b8-4e3d-9872-83a789531773", "node_type": "1", "metadata": {"window": "A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n", "original_text": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. "}, "hash": "7a1c5ef4a255b2f279c94f5cd7dce1af41b5592802cd7bc1fc0713505c84c439", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For comparison, manual annotation of that many\nimages is expected to take at least a week. ", "mimetype": "text/plain", "start_char_idx": 26878, "end_char_idx": 26969, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "20cb007b-35b8-4e3d-9872-83a789531773": {"__data__": {"id_": "20cb007b-35b8-4e3d-9872-83a789531773", "embedding": null, "metadata": {"window": "A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n", "original_text": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "81b8fcaa-24b0-4e36-a70f-36321c55cc4b", "node_type": "1", "metadata": {"window": "To maximize the diversity of the augmented crops,\nthe transformations are applied cyclically: first, the moths are pasted without augmentations;\nwhen all the moths in the collection have been pasted once, the algorithm cycles again through\nthe collection, and a first set of augmentations is applied; at the second cycle, another set is\nchosen, and so on. A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. ", "original_text": "For comparison, manual annotation of that many\nimages is expected to take at least a week. "}, "hash": "33a61cdde13d16f7afaac5161be08618b57e83cd58eed0a63522067078e6fb54", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4ec8e19e-fdff-485e-8be3-d33993fce252", "node_type": "1", "metadata": {"window": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. ", "original_text": "The results will be presented more in details in the corresponding section. "}, "hash": "34cc6aecb783c443c6d07af1f767027e9e27dd9bcc528ca44fac293e97ce4b76", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. ", "mimetype": "text/plain", "start_char_idx": 26969, "end_char_idx": 27121, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4ec8e19e-fdff-485e-8be3-d33993fce252": {"__data__": {"id_": "4ec8e19e-fdff-485e-8be3-d33993fce252", "embedding": null, "metadata": {"window": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. ", "original_text": "The results will be presented more in details in the corresponding section. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "20cb007b-35b8-4e3d-9872-83a789531773", "node_type": "1", "metadata": {"window": "A synthetic image is displayed in Fig2.6 as an example.\n With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n", "original_text": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. "}, "hash": "7a1c5ef4a255b2f279c94f5cd7dce1af41b5592802cd7bc1fc0713505c84c439", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "47fd4105-9146-4e1f-b55b-8ee60eb703eb", "node_type": "1", "metadata": {"window": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. ", "original_text": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n"}, "hash": "bb4deb038b29fa83228363daebc2a3a69db9074b86d61159d184c059d117eabf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The results will be presented more in details in the corresponding section. ", "mimetype": "text/plain", "start_char_idx": 27121, "end_char_idx": 27197, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "47fd4105-9146-4e1f-b55b-8ee60eb703eb": {"__data__": {"id_": "47fd4105-9146-4e1f-b55b-8ee60eb703eb", "embedding": null, "metadata": {"window": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. ", "original_text": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4ec8e19e-fdff-485e-8be3-d33993fce252", "node_type": "1", "metadata": {"window": "With the GUI described before, a collection of 2600 clean crops was created, during a single day\nof work. From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. ", "original_text": "The results will be presented more in details in the corresponding section. "}, "hash": "34cc6aecb783c443c6d07af1f767027e9e27dd9bcc528ca44fac293e97ce4b76", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5f2b5735-1ff7-4060-b09d-a23956e48381", "node_type": "1", "metadata": {"window": "For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. ", "original_text": "Figure 2.6: A synthetic image. "}, "hash": "c112e70a495a6a94efe7739808758bbb563e5214ed34c4710db1b4cfc2387ccc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n", "mimetype": "text/plain", "start_char_idx": 27197, "end_char_idx": 27607, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5f2b5735-1ff7-4060-b09d-a23956e48381": {"__data__": {"id_": "5f2b5735-1ff7-4060-b09d-a23956e48381", "embedding": null, "metadata": {"window": "For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. ", "original_text": "Figure 2.6: A synthetic image. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "47fd4105-9146-4e1f-b55b-8ee60eb703eb", "node_type": "1", "metadata": {"window": "From these, using a collection of more than a hundred empty background images, a\ndataset of 5k images with was synthesized. For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. ", "original_text": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n"}, "hash": "bb4deb038b29fa83228363daebc2a3a69db9074b86d61159d184c059d117eabf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c07ae06d-5414-46c2-a759-d048cd1f91c9", "node_type": "1", "metadata": {"window": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. ", "original_text": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n"}, "hash": "f20d080df29787b985f4e98c2a2fe6ccce56bd6f22729956ad251296ebe8b047", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.6: A synthetic image. ", "mimetype": "text/plain", "start_char_idx": 27607, "end_char_idx": 27638, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c07ae06d-5414-46c2-a759-d048cd1f91c9": {"__data__": {"id_": "c07ae06d-5414-46c2-a759-d048cd1f91c9", "embedding": null, "metadata": {"window": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. ", "original_text": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5f2b5735-1ff7-4060-b09d-a23956e48381", "node_type": "1", "metadata": {"window": "For comparison, manual annotation of that many\nimages is expected to take at least a week. Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. ", "original_text": "Figure 2.6: A synthetic image. "}, "hash": "c112e70a495a6a94efe7739808758bbb563e5214ed34c4710db1b4cfc2387ccc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "079d05e4-9356-420d-8533-711778f054a5", "node_type": "1", "metadata": {"window": "The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. ", "original_text": "2.3. "}, "hash": "958c9e7427a8836f17e1d133eb12e5b871ca8e6a097828785a6265c5e969f2a1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n", "mimetype": "text/plain", "start_char_idx": 27638, "end_char_idx": 27750, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "079d05e4-9356-420d-8533-711778f054a5": {"__data__": {"id_": "079d05e4-9356-420d-8533-711778f054a5", "embedding": null, "metadata": {"window": "The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. ", "original_text": "2.3. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c07ae06d-5414-46c2-a759-d048cd1f91c9", "node_type": "1", "metadata": {"window": "Most importantly, it appeared that the bet paid\noff: models trained on synthetic data performed well on natural data \u2013better than the previ-\nous model. The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. ", "original_text": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n"}, "hash": "f20d080df29787b985f4e98c2a2fe6ccce56bd6f22729956ad251296ebe8b047", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d007f427-36c2-40a8-9b31-ed2ef439ac49", "node_type": "1", "metadata": {"window": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. ", "original_text": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. "}, "hash": "ef9eff1b3928bf20bba56ffcec6f8adc9b70b1157f9d16a7cfe4c0b9f4fd7e83", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.3. ", "mimetype": "text/plain", "start_char_idx": 22085, "end_char_idx": 22090, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d007f427-36c2-40a8-9b31-ed2ef439ac49": {"__data__": {"id_": "d007f427-36c2-40a8-9b31-ed2ef439ac49", "embedding": null, "metadata": {"window": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. ", "original_text": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "079d05e4-9356-420d-8533-711778f054a5", "node_type": "1", "metadata": {"window": "The results will be presented more in details in the corresponding section. Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. ", "original_text": "2.3. "}, "hash": "958c9e7427a8836f17e1d133eb12e5b871ca8e6a097828785a6265c5e969f2a1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "52c10101-23a3-4cb0-86d6-6ddeed17061d", "node_type": "1", "metadata": {"window": "Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. ", "original_text": "In torchvision,\nmany object-detection models are available off-the-shelf. "}, "hash": "ece0a6994f09eab06af2c821c795de3b0cc0a804e2870b37d56292d5d4efbd52", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. ", "mimetype": "text/plain", "start_char_idx": 27755, "end_char_idx": 27881, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "52c10101-23a3-4cb0-86d6-6ddeed17061d": {"__data__": {"id_": "52c10101-23a3-4cb0-86d6-6ddeed17061d", "embedding": null, "metadata": {"window": "Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. ", "original_text": "In torchvision,\nmany object-detection models are available off-the-shelf. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d007f427-36c2-40a8-9b31-ed2ef439ac49", "node_type": "1", "metadata": {"window": "Lastly,\nit is worth noting one advantage of this technique: whenever a new deployment is created,\n4in terms of overlapping surface over moth area\n12\n\ncorresponding background images can be used with the existing collection of segmented in-\nsects to update the training dataset; this way, models can quickly be made familiar with new\ndeployments backgrounds, which might be beneficial for the models\u2019 accuracy.\n Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. ", "original_text": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. "}, "hash": "ef9eff1b3928bf20bba56ffcec6f8adc9b70b1157f9d16a7cfe4c0b9f4fd7e83", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "30c91376-3c31-4fb6-9da7-bc245e3c2586", "node_type": "1", "metadata": {"window": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. ", "original_text": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. "}, "hash": "60de6f5866c443d58d1e4ae2fc92a3844f0b5cc4a673d5de7602af16887c1fcf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In torchvision,\nmany object-detection models are available off-the-shelf. ", "mimetype": "text/plain", "start_char_idx": 27881, "end_char_idx": 27955, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "30c91376-3c31-4fb6-9da7-bc245e3c2586": {"__data__": {"id_": "30c91376-3c31-4fb6-9da7-bc245e3c2586", "embedding": null, "metadata": {"window": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. ", "original_text": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "52c10101-23a3-4cb0-86d6-6ddeed17061d", "node_type": "1", "metadata": {"window": "Figure 2.6: A synthetic image. In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. ", "original_text": "In torchvision,\nmany object-detection models are available off-the-shelf. "}, "hash": "ece0a6994f09eab06af2c821c795de3b0cc0a804e2870b37d56292d5d4efbd52", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fafda3f3-18f8-4366-b744-f92a4f01b495", "node_type": "1", "metadata": {"window": "2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. ", "original_text": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. "}, "hash": "14f2996ab178de094208f47ef622b66cab8cf96d9b9969d6366b424f4394ac7c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. ", "mimetype": "text/plain", "start_char_idx": 27955, "end_char_idx": 28123, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fafda3f3-18f8-4366-b744-f92a4f01b495": {"__data__": {"id_": "fafda3f3-18f8-4366-b744-f92a4f01b495", "embedding": null, "metadata": {"window": "2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. ", "original_text": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "30c91376-3c31-4fb6-9da7-bc245e3c2586", "node_type": "1", "metadata": {"window": "In the last version of the synthetic dataset, moth density was\nincreased from twenty to thirty moths per image.\n 2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. ", "original_text": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. "}, "hash": "60de6f5866c443d58d1e4ae2fc92a3844f0b5cc4a673d5de7602af16887c1fcf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a6abbff5-2dca-4d09-a566-103a46b86814", "node_type": "1", "metadata": {"window": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. ", "original_text": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. "}, "hash": "58448b481266590ef2d2dee303b9938ba08677dc205d64cf71eeba32b79769d4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. ", "mimetype": "text/plain", "start_char_idx": 28123, "end_char_idx": 28353, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a6abbff5-2dca-4d09-a566-103a46b86814": {"__data__": {"id_": "a6abbff5-2dca-4d09-a566-103a46b86814", "embedding": null, "metadata": {"window": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. ", "original_text": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fafda3f3-18f8-4366-b744-f92a4f01b495", "node_type": "1", "metadata": {"window": "2.3. M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. ", "original_text": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. "}, "hash": "14f2996ab178de094208f47ef622b66cab8cf96d9b9969d6366b424f4394ac7c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "524b05d7-cffd-4276-951a-cdacdaf9cac2", "node_type": "1", "metadata": {"window": "In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. ", "original_text": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. "}, "hash": "c47c6ea1014fe2da59c61613443363495b63556099d2788e21327c63c750eaf4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. ", "mimetype": "text/plain", "start_char_idx": 28353, "end_char_idx": 28434, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "524b05d7-cffd-4276-951a-cdacdaf9cac2": {"__data__": {"id_": "524b05d7-cffd-4276-951a-cdacdaf9cac2", "embedding": null, "metadata": {"window": "In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. ", "original_text": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a6abbff5-2dca-4d09-a566-103a46b86814", "node_type": "1", "metadata": {"window": "M ODEL ARCHITECTURE AND TRAINING RECIPE\nSSDlite\nTo improve the speed of the model, changes in the architecture are necessary. In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. ", "original_text": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. "}, "hash": "58448b481266590ef2d2dee303b9938ba08677dc205d64cf71eeba32b79769d4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6", "node_type": "1", "metadata": {"window": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. ", "original_text": "2.7), and the random crops and\nrandom horizontal flips data augmentations. "}, "hash": "e8f5612ae4e7992383504c5f082ab9405a8c6af6ea8588e0849df532525636a6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. ", "mimetype": "text/plain", "start_char_idx": 28434, "end_char_idx": 28617, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6": {"__data__": {"id_": "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6", "embedding": null, "metadata": {"window": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. ", "original_text": "2.7), and the random crops and\nrandom horizontal flips data augmentations. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "524b05d7-cffd-4276-951a-cdacdaf9cac2", "node_type": "1", "metadata": {"window": "In torchvision,\nmany object-detection models are available off-the-shelf. A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. ", "original_text": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. "}, "hash": "c47c6ea1014fe2da59c61613443363495b63556099d2788e21327c63c750eaf4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2bc37f51-7861-44bf-95a5-cacd69bbf16a", "node_type": "1", "metadata": {"window": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. ", "original_text": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. "}, "hash": "ad924989c4493b85ad1d2f47b1c6d48f674577c6c14a54339b235c2ec6195765", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.7), and the random crops and\nrandom horizontal flips data augmentations. ", "mimetype": "text/plain", "start_char_idx": 28617, "end_char_idx": 28692, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2bc37f51-7861-44bf-95a5-cacd69bbf16a": {"__data__": {"id_": "2bc37f51-7861-44bf-95a5-cacd69bbf16a", "embedding": null, "metadata": {"window": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. ", "original_text": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6", "node_type": "1", "metadata": {"window": "A very informative table gives an\noverview of the models, with their performance on the standard COCO benchmark dataset,\ntheir size, and a link to the training recipe. As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. ", "original_text": "2.7), and the random crops and\nrandom horizontal flips data augmentations. "}, "hash": "e8f5612ae4e7992383504c5f082ab9405a8c6af6ea8588e0849df532525636a6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9ea98d20-19fe-43b2-bcdc-7a64359de16f", "node_type": "1", "metadata": {"window": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n", "original_text": "More precisely, the models were too sensitive to the score\nthreshold. "}, "hash": "5a0141b7408e924160d6d14115757b2885fc8b7c3b71c09af4e68a52ca16130c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. ", "mimetype": "text/plain", "start_char_idx": 28692, "end_char_idx": 28862, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9ea98d20-19fe-43b2-bcdc-7a64359de16f": {"__data__": {"id_": "9ea98d20-19fe-43b2-bcdc-7a64359de16f", "embedding": null, "metadata": {"window": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n", "original_text": "More precisely, the models were too sensitive to the score\nthreshold. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2bc37f51-7861-44bf-95a5-cacd69bbf16a", "node_type": "1", "metadata": {"window": "As stated before, the fastest available model \u2014by\nfar\u2014- is SSDlite, an adaptation of the Single Shot Detector [25] which was first briefly intro-\nduced on the MobileNetV2 paper [26] and later reused on the MobileNetV3 paper [27]. Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. ", "original_text": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. "}, "hash": "ad924989c4493b85ad1d2f47b1c6d48f674577c6c14a54339b235c2ec6195765", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25c6d623-7212-495f-9203-dcc218943be7", "node_type": "1", "metadata": {"window": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. ", "original_text": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. "}, "hash": "649f5691a91281371790d427382c49657515b79ea8bdc707003b3399700ab70e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "More precisely, the models were too sensitive to the score\nthreshold. ", "mimetype": "text/plain", "start_char_idx": 28862, "end_char_idx": 28932, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25c6d623-7212-495f-9203-dcc218943be7": {"__data__": {"id_": "25c6d623-7212-495f-9203-dcc218943be7", "embedding": null, "metadata": {"window": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. ", "original_text": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9ea98d20-19fe-43b2-bcdc-7a64359de16f", "node_type": "1", "metadata": {"window": "Initially,\nefforts were pursued to obtain a SSDlite model with good performance. To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n", "original_text": "More precisely, the models were too sensitive to the score\nthreshold. "}, "hash": "5a0141b7408e924160d6d14115757b2885fc8b7c3b71c09af4e68a52ca16130c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5dbe6947-ec88-4513-8499-5c02866e0b1a", "node_type": "1", "metadata": {"window": "2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n", "original_text": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. "}, "hash": "42afe7c602c3f5c51a9d948cf0b6c89af5590c9ad67501ad4c7db0b68668369f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. ", "mimetype": "text/plain", "start_char_idx": 28932, "end_char_idx": 29037, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5dbe6947-ec88-4513-8499-5c02866e0b1a": {"__data__": {"id_": "5dbe6947-ec88-4513-8499-5c02866e0b1a", "embedding": null, "metadata": {"window": "2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n", "original_text": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25c6d623-7212-495f-9203-dcc218943be7", "node_type": "1", "metadata": {"window": "To do so, following the\nindicated training recipe, some functionalities were added to the training script: the cosine an-\nnealing with warm-up epochs learning rate schedule (see Fig. 2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. ", "original_text": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. "}, "hash": "649f5691a91281371790d427382c49657515b79ea8bdc707003b3399700ab70e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bcadae36-028e-45c6-a88b-3832c3fc3524", "node_type": "1", "metadata": {"window": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n", "original_text": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. "}, "hash": "a24ade5de6fd1740927f459978a37f638135f380be4eeaa2599634de61bb2067", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. ", "mimetype": "text/plain", "start_char_idx": 29037, "end_char_idx": 29165, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bcadae36-028e-45c6-a88b-3832c3fc3524": {"__data__": {"id_": "bcadae36-028e-45c6-a88b-3832c3fc3524", "embedding": null, "metadata": {"window": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n", "original_text": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5dbe6947-ec88-4513-8499-5c02866e0b1a", "node_type": "1", "metadata": {"window": "2.7), and the random crops and\nrandom horizontal flips data augmentations. While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n", "original_text": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. "}, "hash": "42afe7c602c3f5c51a9d948cf0b6c89af5590c9ad67501ad4c7db0b68668369f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "216f561c-5899-481d-bd8a-524430ade7d1", "node_type": "1", "metadata": {"window": "More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. ", "original_text": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n"}, "hash": "38d959fb897b9f4bfd2a1ccb30c1d8092787a42a96bbb728fc953eaa16216e83", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. ", "mimetype": "text/plain", "start_char_idx": 29165, "end_char_idx": 29243, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "216f561c-5899-481d-bd8a-524430ade7d1": {"__data__": {"id_": "216f561c-5899-481d-bd8a-524430ade7d1", "embedding": null, "metadata": {"window": "More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. ", "original_text": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bcadae36-028e-45c6-a88b-3832c3fc3524", "node_type": "1", "metadata": {"window": "While each of these delivered improvements, the\nmodel performance \u2013as measured on the validation sets and logged to W&B for each train-\ning run\u2013 remained unsatisfactory. More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n", "original_text": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. "}, "hash": "a24ade5de6fd1740927f459978a37f638135f380be4eeaa2599634de61bb2067", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3", "node_type": "1", "metadata": {"window": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. ", "original_text": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. "}, "hash": "efde7f6940d6e1a2a1850305634820945a9c0579cc1c9b61c85dfb9023fa4efe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n", "mimetype": "text/plain", "start_char_idx": 29243, "end_char_idx": 29422, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3": {"__data__": {"id_": "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3", "embedding": null, "metadata": {"window": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. ", "original_text": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "216f561c-5899-481d-bd8a-524430ade7d1", "node_type": "1", "metadata": {"window": "More precisely, the models were too sensitive to the score\nthreshold. When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. ", "original_text": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n"}, "hash": "38d959fb897b9f4bfd2a1ccb30c1d8092787a42a96bbb728fc953eaa16216e83", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e45ad1e0-2af3-49d3-a545-2adf212a4013", "node_type": "1", "metadata": {"window": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. ", "original_text": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n"}, "hash": "ea0c3de769842d2549b49039863f52109e1dc1311184a7a8facb60f450ac3e6f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. ", "mimetype": "text/plain", "start_char_idx": 29422, "end_char_idx": 29510, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e45ad1e0-2af3-49d3-a545-2adf212a4013": {"__data__": {"id_": "e45ad1e0-2af3-49d3-a545-2adf212a4013", "embedding": null, "metadata": {"window": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. ", "original_text": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3", "node_type": "1", "metadata": {"window": "When the synthetic datasets were introduced, the performance on the (synthetic)\nvalidation sets dropped. In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. ", "original_text": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. "}, "hash": "efde7f6940d6e1a2a1850305634820945a9c0579cc1c9b61c85dfb9023fa4efe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ca134844-683a-495d-a69b-ad9c66a4040c", "node_type": "1", "metadata": {"window": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. ", "original_text": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n"}, "hash": "9be38752d66d807290ad4523cdcb9d3d153b251d6a5b27e26f8d7af66cfdfe83", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n", "mimetype": "text/plain", "start_char_idx": 29510, "end_char_idx": 29584, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ca134844-683a-495d-a69b-ad9c66a4040c": {"__data__": {"id_": "ca134844-683a-495d-a69b-ad9c66a4040c", "embedding": null, "metadata": {"window": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. ", "original_text": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e45ad1e0-2af3-49d3-a545-2adf212a4013", "node_type": "1", "metadata": {"window": "In part, this was revealed to be due to the decrease in bounding box\nsize (caused by the tight fit), which is shown in Fig.2.8. Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. ", "original_text": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n"}, "hash": "ea0c3de769842d2549b49039863f52109e1dc1311184a7a8facb60f450ac3e6f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dab772df-fad7-4e74-a241-96b18ada7f01", "node_type": "1", "metadata": {"window": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n", "original_text": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. "}, "hash": "fcfa057187f566bca94b810b17ec7126e2c6524da0157fc696ee449337518d7f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n", "mimetype": "text/plain", "start_char_idx": 29584, "end_char_idx": 29667, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dab772df-fad7-4e74-a241-96b18ada7f01": {"__data__": {"id_": "dab772df-fad7-4e74-a241-96b18ada7f01", "embedding": null, "metadata": {"window": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n", "original_text": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ca134844-683a-495d-a69b-ad9c66a4040c", "node_type": "1", "metadata": {"window": "Still, the idea to use SSDlite as imple-\nmented in torchvision was abandoned. In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. ", "original_text": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n"}, "hash": "9be38752d66d807290ad4523cdcb9d3d153b251d6a5b27e26f8d7af66cfdfe83", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "96bd5939-4069-4e77-ab9c-3be9bb5f5b62", "node_type": "1", "metadata": {"window": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. ", "original_text": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. "}, "hash": "ae7b814796f1d4934974a0aaa4fcc83c8fdfc52e811754f11d61db56f5053e58", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. ", "mimetype": "text/plain", "start_char_idx": 29667, "end_char_idx": 29797, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "96bd5939-4069-4e77-ab9c-3be9bb5f5b62": {"__data__": {"id_": "96bd5939-4069-4e77-ab9c-3be9bb5f5b62", "embedding": null, "metadata": {"window": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. ", "original_text": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dab772df-fad7-4e74-a241-96b18ada7f01", "node_type": "1", "metadata": {"window": "In hindsight, this should have been done much before;\nSSDlite internally resizes the images to 320x320 pixels, so it is only normal that it performs\npoorly on such small objects.\n 13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n", "original_text": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. "}, "hash": "fcfa057187f566bca94b810b17ec7126e2c6524da0157fc696ee449337518d7f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e6d9ab09-6c64-4e95-a6a2-9868f680646d", "node_type": "1", "metadata": {"window": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n", "original_text": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. "}, "hash": "ffcca3348b8c1e732c8e27f9be9801495c2b15f812f51e05e672210c72f52880", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. ", "mimetype": "text/plain", "start_char_idx": 29797, "end_char_idx": 29952, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e6d9ab09-6c64-4e95-a6a2-9868f680646d": {"__data__": {"id_": "e6d9ab09-6c64-4e95-a6a2-9868f680646d", "embedding": null, "metadata": {"window": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n", "original_text": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "96bd5939-4069-4e77-ab9c-3be9bb5f5b62", "node_type": "1", "metadata": {"window": "13\n\nFigure 2.7: Learning rate displayed for various training runs on the W&B dashboard. The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. ", "original_text": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. "}, "hash": "ae7b814796f1d4934974a0aaa4fcc83c8fdfc52e811754f11d61db56f5053e58", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "379daacf-db91-4803-8797-738959d62c68", "node_type": "1", "metadata": {"window": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n", "original_text": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. "}, "hash": "6db7aaec15f7d2c987db2ba89d731e8c4a960e39ef8bfcc43a66104ea483f6e8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. ", "mimetype": "text/plain", "start_char_idx": 29952, "end_char_idx": 30153, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "379daacf-db91-4803-8797-738959d62c68": {"__data__": {"id_": "379daacf-db91-4803-8797-738959d62c68", "embedding": null, "metadata": {"window": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n", "original_text": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e6d9ab09-6c64-4e95-a6a2-9868f680646d", "node_type": "1", "metadata": {"window": "The\nschedule is cosine annealing, with varying numbers of warm-up epochs.\n Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n", "original_text": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. "}, "hash": "ffcca3348b8c1e732c8e27f9be9801495c2b15f812f51e05e672210c72f52880", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "020544a1-99cb-4974-aa81-6c77428d9a61", "node_type": "1", "metadata": {"window": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. ", "original_text": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n"}, "hash": "55cd8c6d1e152e4830d122fa7829e32eb7a9045e95e578ed9105d059dffd0af5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. ", "mimetype": "text/plain", "start_char_idx": 30153, "end_char_idx": 30318, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "020544a1-99cb-4974-aa81-6c77428d9a61": {"__data__": {"id_": "020544a1-99cb-4974-aa81-6c77428d9a61", "embedding": null, "metadata": {"window": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. ", "original_text": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "379daacf-db91-4803-8797-738959d62c68", "node_type": "1", "metadata": {"window": "Figure 2.8: Bounding box size distribution for the natural and synthetic datasets.\n Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n", "original_text": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. "}, "hash": "6db7aaec15f7d2c987db2ba89d731e8c4a960e39ef8bfcc43a66104ea483f6e8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2cbe8c40-fe58-4443-87fe-4153b1f157bf", "node_type": "1", "metadata": {"window": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. ", "original_text": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. "}, "hash": "f91f518e4dbd38f938e176015901ce1eb587c92e78702bd8afa2748a2d4809b1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n", "mimetype": "text/plain", "start_char_idx": 30318, "end_char_idx": 30437, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2cbe8c40-fe58-4443-87fe-4153b1f157bf": {"__data__": {"id_": "2cbe8c40-fe58-4443-87fe-4153b1f157bf", "embedding": null, "metadata": {"window": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. ", "original_text": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "020544a1-99cb-4974-aa81-6c77428d9a61", "node_type": "1", "metadata": {"window": "Faster R-CNN and Retina-Net\nTo test the newly created synthetic dataset, the same architecture of the model to beat was\nemployed. The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. ", "original_text": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n"}, "hash": "55cd8c6d1e152e4830d122fa7829e32eb7a9045e95e578ed9105d059dffd0af5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1714cc7c-31df-49c3-ab78-61ae75f057d6", "node_type": "1", "metadata": {"window": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n", "original_text": "While the performance goals\nhad been achieved, the model was as slow as ever.\n"}, "hash": "6b1b90820c81a0026a4ba679f795ad09d32dbc722905e18ef2408eb08715c91c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. ", "mimetype": "text/plain", "start_char_idx": 30437, "end_char_idx": 30600, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1714cc7c-31df-49c3-ab78-61ae75f057d6": {"__data__": {"id_": "1714cc7c-31df-49c3-ab78-61ae75f057d6", "embedding": null, "metadata": {"window": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n", "original_text": "While the performance goals\nhad been achieved, the model was as slow as ever.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2cbe8c40-fe58-4443-87fe-4153b1f157bf", "node_type": "1", "metadata": {"window": "The new Faster R-CNN with ResNet-50-FPN did great, both in terms of mAP on\nthe (synthetic) evaluation set, and, upon visual inspection, on natural images. Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. ", "original_text": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. "}, "hash": "f91f518e4dbd38f938e176015901ce1eb587c92e78702bd8afa2748a2d4809b1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7d9c121a-4000-4387-960f-62caea151cda", "node_type": "1", "metadata": {"window": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. ", "original_text": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n"}, "hash": "d00dd1338749371f0ee91d01d4fe91a832de33aca644ea84be2e0e22261f9422", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While the performance goals\nhad been achieved, the model was as slow as ever.\n", "mimetype": "text/plain", "start_char_idx": 30600, "end_char_idx": 30678, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7d9c121a-4000-4387-960f-62caea151cda": {"__data__": {"id_": "7d9c121a-4000-4387-960f-62caea151cda", "embedding": null, "metadata": {"window": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. ", "original_text": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1714cc7c-31df-49c3-ab78-61ae75f057d6", "node_type": "1", "metadata": {"window": "Mimicking the\ntraining recipe indicated in torchvision, pre-trained weights for the backbone and a multi-step\nlearning rate schedule were adopted, with no augmentations other than the horizontal flip. As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n", "original_text": "While the performance goals\nhad been achieved, the model was as slow as ever.\n"}, "hash": "6b1b90820c81a0026a4ba679f795ad09d32dbc722905e18ef2408eb08715c91c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "789229fb-a1cd-484c-a3b2-46341da8ac31", "node_type": "1", "metadata": {"window": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n", "original_text": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. "}, "hash": "d1109204e3645433ab1434115450359a28ed382d81940a50fdc6263165b1a0b2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n", "mimetype": "text/plain", "start_char_idx": 30678, "end_char_idx": 30858, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "789229fb-a1cd-484c-a3b2-46341da8ac31": {"__data__": {"id_": "789229fb-a1cd-484c-a3b2-46341da8ac31", "embedding": null, "metadata": {"window": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n", "original_text": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7d9c121a-4000-4387-960f-62caea151cda", "node_type": "1", "metadata": {"window": "As\nthe distribution of bounding box sizes displayed in Fig.2.8 is relatively narrow, the number\nof anchors \u2014a parameter that is not readily accessible\u2014 was reduced. This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. ", "original_text": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n"}, "hash": "d00dd1338749371f0ee91d01d4fe91a832de33aca644ea84be2e0e22261f9422", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3741d8c8-eda7-4c82-b775-dc9b223061d5", "node_type": "1", "metadata": {"window": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. ", "original_text": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. "}, "hash": "c9800b2ff8d3c3734e65d7ea780d9e8276ff616d6e5e14d17d7b4ff421f724f2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. ", "mimetype": "text/plain", "start_char_idx": 30858, "end_char_idx": 30960, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3741d8c8-eda7-4c82-b775-dc9b223061d5": {"__data__": {"id_": "3741d8c8-eda7-4c82-b775-dc9b223061d5", "embedding": null, "metadata": {"window": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. ", "original_text": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "789229fb-a1cd-484c-a3b2-46341da8ac31", "node_type": "1", "metadata": {"window": "This was expected to\nimprove the model speed without affecting its accuracy, but in fact it affected neither of those.\n In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n", "original_text": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. "}, "hash": "d1109204e3645433ab1434115450359a28ed382d81940a50fdc6263165b1a0b2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ed070ac8-242c-436d-979f-e5dfb6a0a8c9", "node_type": "1", "metadata": {"window": "While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. ", "original_text": "The accuracy was measured on the synthetic evaluation set.\n"}, "hash": "311938c745a21f147f6567965771997fe91befa612afda3c8936c47e56de58a8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. ", "mimetype": "text/plain", "start_char_idx": 30960, "end_char_idx": 31068, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ed070ac8-242c-436d-979f-e5dfb6a0a8c9": {"__data__": {"id_": "ed070ac8-242c-436d-979f-e5dfb6a0a8c9", "embedding": null, "metadata": {"window": "While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. ", "original_text": "The accuracy was measured on the synthetic evaluation set.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3741d8c8-eda7-4c82-b775-dc9b223061d5", "node_type": "1", "metadata": {"window": "In hindsight, perhaps the reason is that, with such high resolution images (4096x2160 pixels),\n14\n\nmost of the inference time is taken to compute the feature map. While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. ", "original_text": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. "}, "hash": "c9800b2ff8d3c3734e65d7ea780d9e8276ff616d6e5e14d17d7b4ff421f724f2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "04b13447-624c-4234-8f00-ddc6e36c6261", "node_type": "1", "metadata": {"window": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. ", "original_text": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. "}, "hash": "eb24fe057ee1d6b8fc6e992577fb70eff6caf4c31c2fa62e3860996ad0d7d97e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The accuracy was measured on the synthetic evaluation set.\n", "mimetype": "text/plain", "start_char_idx": 31068, "end_char_idx": 31127, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "04b13447-624c-4234-8f00-ddc6e36c6261": {"__data__": {"id_": "04b13447-624c-4234-8f00-ddc6e36c6261", "embedding": null, "metadata": {"window": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. ", "original_text": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ed070ac8-242c-436d-979f-e5dfb6a0a8c9", "node_type": "1", "metadata": {"window": "While the performance goals\nhad been achieved, the model was as slow as ever.\n Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. ", "original_text": "The accuracy was measured on the synthetic evaluation set.\n"}, "hash": "311938c745a21f147f6567965771997fe91befa612afda3c8936c47e56de58a8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8c368101-cf58-43d0-b0f5-405073a07265", "node_type": "1", "metadata": {"window": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n", "original_text": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n"}, "hash": "8b6ea312a6c51a74270a5728ccc379c91b4b0e12f5f75e51f5046140f62e5180", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. ", "mimetype": "text/plain", "start_char_idx": 31127, "end_char_idx": 31274, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8c368101-cf58-43d0-b0f5-405073a07265": {"__data__": {"id_": "8c368101-cf58-43d0-b0f5-405073a07265", "embedding": null, "metadata": {"window": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n", "original_text": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "04b13447-624c-4234-8f00-ddc6e36c6261", "node_type": "1", "metadata": {"window": "Experimentation with RetinaNet-v2 [28], a much more modern architecture compared to Faster\nR-CNN, was disappointing as there were no improvements neither in speed nor in accuracy.\n There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. ", "original_text": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. "}, "hash": "eb24fe057ee1d6b8fc6e992577fb70eff6caf4c31c2fa62e3860996ad0d7d97e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5b3eb67a-149f-437c-9947-f25de38686ad", "node_type": "1", "metadata": {"window": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. ", "original_text": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. "}, "hash": "8d8975b96147c1ebf2611f3f3e6d3658d2746da80b69b1f7527ff5e7bd21fdec", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n", "mimetype": "text/plain", "start_char_idx": 31274, "end_char_idx": 31383, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5b3eb67a-149f-437c-9947-f25de38686ad": {"__data__": {"id_": "5b3eb67a-149f-437c-9947-f25de38686ad", "embedding": null, "metadata": {"window": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. ", "original_text": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8c368101-cf58-43d0-b0f5-405073a07265", "node_type": "1", "metadata": {"window": "There was good reason to believe that the single-stage model would be faster, but it didn\u2019t\nprove so. Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n", "original_text": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n"}, "hash": "8b6ea312a6c51a74270a5728ccc379c91b4b0e12f5f75e51f5046140f62e5180", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e55d245d-d07f-4a06-923b-df6c86366071", "node_type": "1", "metadata": {"window": "The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n", "original_text": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. "}, "hash": "59b68497c7c2201fe2a093a53b7f1fb6bf76fadfaf0a13195bc33a3def55966d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. ", "mimetype": "text/plain", "start_char_idx": 31383, "end_char_idx": 31479, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e55d245d-d07f-4a06-923b-df6c86366071": {"__data__": {"id_": "e55d245d-d07f-4a06-923b-df6c86366071", "embedding": null, "metadata": {"window": "The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n", "original_text": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5b3eb67a-149f-437c-9947-f25de38686ad", "node_type": "1", "metadata": {"window": "Again, this could be due to the high resolution of the images, which make the\nbackbone the main bottleneck. The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. ", "original_text": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. "}, "hash": "8d8975b96147c1ebf2611f3f3e6d3658d2746da80b69b1f7527ff5e7bd21fdec", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d50a217-465b-45ec-b02d-f640e3b65384", "node_type": "1", "metadata": {"window": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). ", "original_text": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. "}, "hash": "4aa7242cd1a2b1c83de1eca37f1f4a9ddf85ba11aaab6fd81b12ead28febbd10", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. ", "mimetype": "text/plain", "start_char_idx": 31479, "end_char_idx": 31558, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d50a217-465b-45ec-b02d-f640e3b65384": {"__data__": {"id_": "6d50a217-465b-45ec-b02d-f640e3b65384", "embedding": null, "metadata": {"window": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). ", "original_text": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e55d245d-d07f-4a06-923b-df6c86366071", "node_type": "1", "metadata": {"window": "The accuracy was measured on the synthetic evaluation set.\n However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n", "original_text": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. "}, "hash": "59b68497c7c2201fe2a093a53b7f1fb6bf76fadfaf0a13195bc33a3def55966d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f6312498-1b40-483c-94e8-69ab9cfe89e3", "node_type": "1", "metadata": {"window": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. ", "original_text": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n"}, "hash": "b2d0750ef3a93072209e259eb89025da6181f785277380ec0a79d116626e61a2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. ", "mimetype": "text/plain", "start_char_idx": 31558, "end_char_idx": 31773, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f6312498-1b40-483c-94e8-69ab9cfe89e3": {"__data__": {"id_": "f6312498-1b40-483c-94e8-69ab9cfe89e3", "embedding": null, "metadata": {"window": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. ", "original_text": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d50a217-465b-45ec-b02d-f640e3b65384", "node_type": "1", "metadata": {"window": "However, the assumption that small differences in accuracy on synthetic images proportion-\nally translate to natural images is yet to be verified. Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). ", "original_text": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. "}, "hash": "4aa7242cd1a2b1c83de1eca37f1f4a9ddf85ba11aaab6fd81b12ead28febbd10", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e8aae986-aad5-4687-8947-c0651f775ade", "node_type": "1", "metadata": {"window": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n", "original_text": "2.4. "}, "hash": "f3d4bef01e5f5d0fe678aee8be7514350ccb13283fb56694be0d5cf11dd35eb2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n", "mimetype": "text/plain", "start_char_idx": 31773, "end_char_idx": 31947, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e8aae986-aad5-4687-8947-c0651f775ade": {"__data__": {"id_": "e8aae986-aad5-4687-8947-c0651f775ade", "embedding": null, "metadata": {"window": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n", "original_text": "2.4. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f6312498-1b40-483c-94e8-69ab9cfe89e3", "node_type": "1", "metadata": {"window": "Further efforts to improve the accuracy of\nthe model should be delayed until proper test sets are available.\n Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. ", "original_text": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n"}, "hash": "b2d0750ef3a93072209e259eb89025da6181f785277380ec0a79d116626e61a2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c192aa80-849b-451e-8c65-b717f84ec28c", "node_type": "1", "metadata": {"window": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. ", "original_text": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n"}, "hash": "773d846e3f2a5b1610a32e60873bbfce4a2b809b046ba95606cb40034b5f8f52", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "2.4. ", "mimetype": "text/plain", "start_char_idx": 31947, "end_char_idx": 31952, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c192aa80-849b-451e-8c65-b717f84ec28c": {"__data__": {"id_": "c192aa80-849b-451e-8c65-b717f84ec28c", "embedding": null, "metadata": {"window": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. ", "original_text": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e8aae986-aad5-4687-8947-c0651f775ade", "node_type": "1", "metadata": {"window": "Finally, the Faster R-CNN with MobileNetV3-Large-FPN backbone was tested, with great suc-\ncess. On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n", "original_text": "2.4. "}, "hash": "f3d4bef01e5f5d0fe678aee8be7514350ccb13283fb56694be0d5cf11dd35eb2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49", "node_type": "1", "metadata": {"window": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. ", "original_text": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). "}, "hash": "4265fa846338def076c0ba8f29a52ec55076ec42c3d94681dab626e4b33560d1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n", "mimetype": "text/plain", "start_char_idx": 31952, "end_char_idx": 32057, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49": {"__data__": {"id_": "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49", "embedding": null, "metadata": {"window": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. ", "original_text": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c192aa80-849b-451e-8c65-b717f84ec28c", "node_type": "1", "metadata": {"window": "On CPU, the model was 6 times fasterthan its equivalent with heavier backbone. Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. ", "original_text": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n"}, "hash": "773d846e3f2a5b1610a32e60873bbfce4a2b809b046ba95606cb40034b5f8f52", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "eb14c19c-3e70-47b0-a6c1-af75e5370765", "node_type": "1", "metadata": {"window": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n", "original_text": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. "}, "hash": "12181085e277c9f7b4698f873c81b9137dcf3765bde2d48ea0414c30d27af126", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). ", "mimetype": "text/plain", "start_char_idx": 32057, "end_char_idx": 32294, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "eb14c19c-3e70-47b0-a6c1-af75e5370765": {"__data__": {"id_": "eb14c19c-3e70-47b0-a6c1-af75e5370765", "embedding": null, "metadata": {"window": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n", "original_text": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49", "node_type": "1", "metadata": {"window": "Again,\nusing the performance on synthetic data as a proxy for the performance on natural data, hyper-\nparameters such as the number of trainable backbone layers and the learning rate were tweaked\nto optimal values. An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. ", "original_text": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). "}, "hash": "4265fa846338def076c0ba8f29a52ec55076ec42c3d94681dab626e4b33560d1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2b596c03-cdbe-4d95-bb27-acb23a20f45f", "node_type": "1", "metadata": {"window": "2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. ", "original_text": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n"}, "hash": "2854355e88fc96c5c8b72436ff75dd701f0fd97a5193a0e34679e4fc3989ec3d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. ", "mimetype": "text/plain", "start_char_idx": 32294, "end_char_idx": 32402, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2b596c03-cdbe-4d95-bb27-acb23a20f45f": {"__data__": {"id_": "2b596c03-cdbe-4d95-bb27-acb23a20f45f", "embedding": null, "metadata": {"window": "2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. ", "original_text": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "eb14c19c-3e70-47b0-a6c1-af75e5370765", "node_type": "1", "metadata": {"window": "An attempt to match this model accuracy with a custom RetinaNet-v2 with\nMobileNetV3-Large-FPN backbone \u2014a pairing that is not available off-the-shelf\u2014 proved un-\nsuccessful.\n 2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n", "original_text": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. "}, "hash": "12181085e277c9f7b4698f873c81b9137dcf3765bde2d48ea0414c30d27af126", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "066934b3-376f-4c44-963d-582da2c082ee", "node_type": "1", "metadata": {"window": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. ", "original_text": "A constant threshold was kept for each model. "}, "hash": "60e45ab5d1ffb1bbdb1a3e4342d16edd94bf52a5e32dbaedb8a034e1d2650aa7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n", "mimetype": "text/plain", "start_char_idx": 32402, "end_char_idx": 32511, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "066934b3-376f-4c44-963d-582da2c082ee": {"__data__": {"id_": "066934b3-376f-4c44-963d-582da2c082ee", "embedding": null, "metadata": {"window": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. ", "original_text": "A constant threshold was kept for each model. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2b596c03-cdbe-4d95-bb27-acb23a20f45f", "node_type": "1", "metadata": {"window": "2.4. R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. ", "original_text": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n"}, "hash": "2854355e88fc96c5c8b72436ff75dd701f0fd97a5193a0e34679e4fc3989ec3d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fc4eb05a-8cb8-4116-8635-524e9867c4cc", "node_type": "1", "metadata": {"window": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. ", "original_text": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. "}, "hash": "fbcc86a7247413953493145039158a50072717ac2ee05462e107650a6b02f55d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A constant threshold was kept for each model. ", "mimetype": "text/plain", "start_char_idx": 32511, "end_char_idx": 32557, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fc4eb05a-8cb8-4116-8635-524e9867c4cc": {"__data__": {"id_": "fc4eb05a-8cb8-4116-8635-524e9867c4cc", "embedding": null, "metadata": {"window": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. ", "original_text": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "066934b3-376f-4c44-963d-582da2c082ee", "node_type": "1", "metadata": {"window": "R ESULTS\nThe goals of this work were to improve the object detector both in terms of speed and accuracy.\n While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. ", "original_text": "A constant threshold was kept for each model. "}, "hash": "60e45ab5d1ffb1bbdb1a3e4342d16edd94bf52a5e32dbaedb8a034e1d2650aa7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda", "node_type": "1", "metadata": {"window": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n", "original_text": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n"}, "hash": "f718c0829d5d15ec20d7dc243083e4c40404306e8a4d8f892e7f47925cbdeb44", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. ", "mimetype": "text/plain", "start_char_idx": 32557, "end_char_idx": 32703, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda": {"__data__": {"id_": "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda", "embedding": null, "metadata": {"window": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n", "original_text": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fc4eb05a-8cb8-4116-8635-524e9867c4cc", "node_type": "1", "metadata": {"window": "While also creating a good basis for further development, these goals were achieved with the\ncreation (and deployment) of two Faster R-CNN models: a slow model (with ResNet-50-FPN\nbackbone) and a fast model (with MobileNetV3-Large-FPN). In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. ", "original_text": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. "}, "hash": "fbcc86a7247413953493145039158a50072717ac2ee05462e107650a6b02f55d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08", "node_type": "1", "metadata": {"window": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n", "original_text": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. "}, "hash": "08a7ed8df05f285019a22ed2b4408fc81fb6bf668ab4929be0c6a9dda1e93147", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n", "mimetype": "text/plain", "start_char_idx": 32703, "end_char_idx": 32813, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08": {"__data__": {"id_": "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08", "embedding": null, "metadata": {"window": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n", "original_text": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda", "node_type": "1", "metadata": {"window": "In the following pages, a visual\ncomparison between the performance of the old and new models is presented. To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n", "original_text": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n"}, "hash": "f718c0829d5d15ec20d7dc243083e4c40404306e8a4d8f892e7f47925cbdeb44", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "745fea69-17e1-4cf3-94b3-a2ca2cc93d52", "node_type": "1", "metadata": {"window": "A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n", "original_text": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. "}, "hash": "2556d7d6e0ae724259ffb7d7e29db378540c29c81155ea76af44154a000ba711", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. ", "mimetype": "text/plain", "start_char_idx": 32813, "end_char_idx": 33145, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "745fea69-17e1-4cf3-94b3-a2ca2cc93d52": {"__data__": {"id_": "745fea69-17e1-4cf3-94b3-a2ca2cc93d52", "embedding": null, "metadata": {"window": "A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n", "original_text": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08", "node_type": "1", "metadata": {"window": "To ensure fair-\nness, a representative subsample of the differences viewed during image inspection is shown.\n A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n", "original_text": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. "}, "hash": "08a7ed8df05f285019a22ed2b4408fc81fb6bf668ab4929be0c6a9dda1e93147", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ed2262d0-d291-4519-81c0-fbbdd34decc8", "node_type": "1", "metadata": {"window": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. ", "original_text": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. "}, "hash": "de8b7f860874cd1736573e9bacc5efdf917b8288472e973a2c710f932eed471e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. ", "mimetype": "text/plain", "start_char_idx": 33145, "end_char_idx": 33521, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ed2262d0-d291-4519-81c0-fbbdd34decc8": {"__data__": {"id_": "ed2262d0-d291-4519-81c0-fbbdd34decc8", "embedding": null, "metadata": {"window": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. ", "original_text": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "745fea69-17e1-4cf3-94b3-a2ca2cc93d52", "node_type": "1", "metadata": {"window": "A constant threshold was kept for each model. Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n", "original_text": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. "}, "hash": "2556d7d6e0ae724259ffb7d7e29db378540c29c81155ea76af44154a000ba711", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "16149cf3-726a-481c-b7ae-7b8e3b9a06d3", "node_type": "1", "metadata": {"window": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. ", "original_text": "Users can select the desired model based on their time constraints.\n"}, "hash": "0e223a115fa2b2d4b5472ecb57ab8bbef8584578a09085b8a5619fc834b685de", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. ", "mimetype": "text/plain", "start_char_idx": 33521, "end_char_idx": 33937, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "16149cf3-726a-481c-b7ae-7b8e3b9a06d3": {"__data__": {"id_": "16149cf3-726a-481c-b7ae-7b8e3b9a06d3", "embedding": null, "metadata": {"window": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. ", "original_text": "Users can select the desired model based on their time constraints.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ed2262d0-d291-4519-81c0-fbbdd34decc8", "node_type": "1", "metadata": {"window": "Each figure displays three images: the first\ncorresponds to the old model, the second to the new slow model, and the third to the new fast\nmodel. When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. ", "original_text": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. "}, "hash": "de8b7f860874cd1736573e9bacc5efdf917b8288472e973a2c710f932eed471e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c4bdd6cc-2e56-482c-bd50-78786efc968d", "node_type": "1", "metadata": {"window": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. ", "original_text": "The code for the object detection module is available on the ami-ml public GitHub repository.\n"}, "hash": "391819ccdc84bfb8c3729479b149e001fc35bd7e676708875c3149482085a881", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Users can select the desired model based on their time constraints.\n", "mimetype": "text/plain", "start_char_idx": 33937, "end_char_idx": 34005, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c4bdd6cc-2e56-482c-bd50-78786efc968d": {"__data__": {"id_": "c4bdd6cc-2e56-482c-bd50-78786efc968d", "embedding": null, "metadata": {"window": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. ", "original_text": "The code for the object detection module is available on the ami-ml public GitHub repository.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "16149cf3-726a-481c-b7ae-7b8e3b9a06d3", "node_type": "1", "metadata": {"window": "When only two images are presented, the second image counts for both new models\n(predictions were identical).\n Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. ", "original_text": "Users can select the desired model based on their time constraints.\n"}, "hash": "0e223a115fa2b2d4b5472ecb57ab8bbef8584578a09085b8a5619fc834b685de", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ce895fd5-493c-4fc9-8cb3-44afeb12d472", "node_type": "1", "metadata": {"window": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n", "original_text": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n"}, "hash": "f457c85c520f2626c78126e285157d9629d6c655121da9f464ae87ebfc3ea40e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The code for the object detection module is available on the ami-ml public GitHub repository.\n", "mimetype": "text/plain", "start_char_idx": 34005, "end_char_idx": 34099, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ce895fd5-493c-4fc9-8cb3-44afeb12d472": {"__data__": {"id_": "ce895fd5-493c-4fc9-8cb3-44afeb12d472", "embedding": null, "metadata": {"window": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n", "original_text": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c4bdd6cc-2e56-482c-bd50-78786efc968d", "node_type": "1", "metadata": {"window": "Figure 2.9\n15\n\nFigure 2.10\nFigure 2.11\nFigure 2.12\nFigure 2.13\n16\n\nFigure 2.14\nFigure 2.15\nFigure 2.16\nFigure 2.17\n17\n\nFigure 2.18\nFrom visual inspection of the models\u2019 predictions on natural images, it seems safe to say that\nboth are better than the previous model in terms of accuracy, largely thanks to the new training\ndataset. The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. ", "original_text": "The code for the object detection module is available on the ami-ml public GitHub repository.\n"}, "hash": "391819ccdc84bfb8c3729479b149e001fc35bd7e676708875c3149482085a881", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cc8d696d-c1d6-4de3-b73d-d86d9caedd88", "node_type": "1", "metadata": {"window": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. ", "original_text": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. "}, "hash": "3491bf595a940c0f7272e310694405c4ae72c426aa4771445a16b19a24dc9ab0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n", "mimetype": "text/plain", "start_char_idx": 34099, "end_char_idx": 34182, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cc8d696d-c1d6-4de3-b73d-d86d9caedd88": {"__data__": {"id_": "cc8d696d-c1d6-4de3-b73d-d86d9caedd88", "embedding": null, "metadata": {"window": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. ", "original_text": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ce895fd5-493c-4fc9-8cb3-44afeb12d472", "node_type": "1", "metadata": {"window": "The following observations can be made about the new models:\n(i) they are better at detecting small moths (Fig.2.13, 2.14, 2.15, 2.16, 2.17, 2.18);\n(ii) they are better at separating overlapping moths, although not perfect (Fig.2.9, 2.12);\n(iii) they don\u2019t get fooled by artifacts on the screen (Fig.2.11, 2.15, 2.16, 2.18;\n(iv) they seem more resistant to noisy images (e.g. moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n", "original_text": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n"}, "hash": "f457c85c520f2626c78126e285157d9629d6c655121da9f464ae87ebfc3ea40e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0", "node_type": "1", "metadata": {"window": "Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. ", "original_text": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. "}, "hash": "a3233ddc7fd6096926749aaeba36601856be0fa48db6a5ca2ed784f4832da4cf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. ", "mimetype": "text/plain", "start_char_idx": 34182, "end_char_idx": 34300, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0": {"__data__": {"id_": "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0", "embedding": null, "metadata": {"window": "Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. ", "original_text": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cc8d696d-c1d6-4de3-b73d-d86d9caedd88", "node_type": "1", "metadata": {"window": "moths flying close to the camera, Fig.2.10,\n2.14);\n(v) by design, they tend not to detect mosquitos and flies, as those were present on the back-\nground images used for the synthetic dataset, and were filtered out of the segmented\ncrops (Fig.2.18);\n(vi) by design, they predict tight bounding boxes;\n(vii) the slow model seems to have an edge over the fast mode (Fig.2.9 and 2.11, which is why\nit was also released. Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. ", "original_text": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. "}, "hash": "3491bf595a940c0f7272e310694405c4ae72c426aa4771445a16b19a24dc9ab0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "98725ca6-45fd-40b2-a90b-3b0b031018bb", "node_type": "1", "metadata": {"window": "The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). ", "original_text": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. "}, "hash": "93b8b0b0879f5a599f4852774e9aecf3b72bccc775cd692e69d86b1e08c16508", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. ", "mimetype": "text/plain", "start_char_idx": 34300, "end_char_idx": 34564, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "98725ca6-45fd-40b2-a90b-3b0b031018bb": {"__data__": {"id_": "98725ca6-45fd-40b2-a90b-3b0b031018bb", "embedding": null, "metadata": {"window": "The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). ", "original_text": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0", "node_type": "1", "metadata": {"window": "Users can select the desired model based on their time constraints.\n The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. ", "original_text": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. "}, "hash": "a3233ddc7fd6096926749aaeba36601856be0fa48db6a5ca2ed784f4832da4cf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "262df308-d299-464d-9709-654347f75a42", "node_type": "1", "metadata": {"window": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. ", "original_text": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n"}, "hash": "c4977a8c0a897634cc27a38e5e08db7f6ca914023e6f969e7d7c0f1b4472058a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. ", "mimetype": "text/plain", "start_char_idx": 34564, "end_char_idx": 34811, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "262df308-d299-464d-9709-654347f75a42": {"__data__": {"id_": "262df308-d299-464d-9709-654347f75a42", "embedding": null, "metadata": {"window": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. ", "original_text": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "98725ca6-45fd-40b2-a90b-3b0b031018bb", "node_type": "1", "metadata": {"window": "The code for the object detection module is available on the ami-ml public GitHub repository.\n Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). ", "original_text": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. "}, "hash": "93b8b0b0879f5a599f4852774e9aecf3b72bccc775cd692e69d86b1e08c16508", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba", "node_type": "1", "metadata": {"window": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. ", "original_text": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. "}, "hash": "c1bb27db81daf0d216d0989596ba4ccbc76a1083adca04b9a452af3d14babc55", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n", "mimetype": "text/plain", "start_char_idx": 34811, "end_char_idx": 34914, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba": {"__data__": {"id_": "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba", "embedding": null, "metadata": {"window": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. ", "original_text": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "262df308-d299-464d-9709-654347f75a42", "node_type": "1", "metadata": {"window": "Hopefully, the code for the other modules will soon be merged on the repo as well.\n 18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. ", "original_text": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n"}, "hash": "c4977a8c0a897634cc27a38e5e08db7f6ca914023e6f969e7d7c0f1b4472058a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f60b9a44-dd5e-4f45-841f-a61ed2ea1403", "node_type": "1", "metadata": {"window": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). ", "original_text": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. "}, "hash": "774cd969ff50313b82997d64f9b2e33ecb941069204314328ff8551b83c38ef9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. ", "mimetype": "text/plain", "start_char_idx": 34914, "end_char_idx": 35040, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f60b9a44-dd5e-4f45-841f-a61ed2ea1403": {"__data__": {"id_": "f60b9a44-dd5e-4f45-841f-a61ed2ea1403", "embedding": null, "metadata": {"window": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). ", "original_text": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba", "node_type": "1", "metadata": {"window": "18\n\nChapter 3\nActive Learning\nThe final step of the pipeline presented in Fig.1.2 is the moth species classification. As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. ", "original_text": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. "}, "hash": "c1bb27db81daf0d216d0989596ba4ccbc76a1083adca04b9a452af3d14babc55", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bd3504db-ef98-4219-8390-ad39c8c56d9d", "node_type": "1", "metadata": {"window": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. ", "original_text": "Additionally, these images are low resolution (around\n120 pixels per side, on average). "}, "hash": "6d065ce56a2e6c1f4687bc94434be19964684c7c0a9d802c674752e3bdbcbe49", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. ", "mimetype": "text/plain", "start_char_idx": 35040, "end_char_idx": 35146, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bd3504db-ef98-4219-8390-ad39c8c56d9d": {"__data__": {"id_": "bd3504db-ef98-4219-8390-ad39c8c56d9d", "embedding": null, "metadata": {"window": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. ", "original_text": "Additionally, these images are low resolution (around\n120 pixels per side, on average). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f60b9a44-dd5e-4f45-841f-a61ed2ea1403", "node_type": "1", "metadata": {"window": "As men-\ntioned in the introduction, since there is no diverse and extensive labeled image dataset from\nmoth traps, and given that making one from scratch is not feasible with the current time and\nbudget constraints, the training dataset was constructed with GBIF. For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). ", "original_text": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. "}, "hash": "774cd969ff50313b82997d64f9b2e33ecb941069204314328ff8551b83c38ef9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "75dba363-5554-4930-aef5-b2e7e72fc1ec", "node_type": "1", "metadata": {"window": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n", "original_text": "In contrast, GBIF images are mostly high resolution. "}, "hash": "b5bd4321785741417c3682eb4737f8a90763ce47125b510170f51afaf24c5b92", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Additionally, these images are low resolution (around\n120 pixels per side, on average). ", "mimetype": "text/plain", "start_char_idx": 35146, "end_char_idx": 35234, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "75dba363-5554-4930-aef5-b2e7e72fc1ec": {"__data__": {"id_": "75dba363-5554-4930-aef5-b2e7e72fc1ec", "embedding": null, "metadata": {"window": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n", "original_text": "In contrast, GBIF images are mostly high resolution. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bd3504db-ef98-4219-8390-ad39c8c56d9d", "node_type": "1", "metadata": {"window": "For a single region, there\nare usually multiple thousands relevant species of moths; GBIF has been fundamental to build\na classifier able to identify as many classes, making it possible to create training datasets of\nmany hundred thousand images. Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. ", "original_text": "Additionally, these images are low resolution (around\n120 pixels per side, on average). "}, "hash": "6d065ce56a2e6c1f4687bc94434be19964684c7c0a9d802c674752e3bdbcbe49", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4e2999a5-e19b-459a-829d-4feff5e69605", "node_type": "1", "metadata": {"window": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. ", "original_text": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. "}, "hash": "de390a2907d0cf262ce54afd40ad2fe9ccf66ff9cff2a66e84316af553e77a80", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In contrast, GBIF images are mostly high resolution. ", "mimetype": "text/plain", "start_char_idx": 35234, "end_char_idx": 35287, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4e2999a5-e19b-459a-829d-4feff5e69605": {"__data__": {"id_": "4e2999a5-e19b-459a-829d-4feff5e69605", "embedding": null, "metadata": {"window": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. ", "original_text": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "75dba363-5554-4930-aef5-b2e7e72fc1ec", "node_type": "1", "metadata": {"window": "Unfortunately, there is a significant data shift between train-\ning images and images from moth traps.\n Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n", "original_text": "In contrast, GBIF images are mostly high resolution. "}, "hash": "b5bd4321785741417c3682eb4737f8a90763ce47125b510170f51afaf24c5b92", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "46da3bf6-3a19-4591-b4e7-e58c2df3d70a", "node_type": "1", "metadata": {"window": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n", "original_text": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). "}, "hash": "4038a38bbe8f1b39611eeda1a469bf01a6738d380100ad1cbe6310d5a4249721", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. ", "mimetype": "text/plain", "start_char_idx": 35287, "end_char_idx": 35456, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "46da3bf6-3a19-4591-b4e7-e58c2df3d70a": {"__data__": {"id_": "46da3bf6-3a19-4591-b4e7-e58c2df3d70a", "embedding": null, "metadata": {"window": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n", "original_text": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4e2999a5-e19b-459a-829d-4feff5e69605", "node_type": "1", "metadata": {"window": "Typical images from GBIF and from moth traps (following object detection) are presented in\nFig.3.1 and Fig.3.2, respectively. In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. ", "original_text": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. "}, "hash": "de390a2907d0cf262ce54afd40ad2fe9ccf66ff9cff2a66e84316af553e77a80", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "be2059b1-c993-4075-ba87-0a9a4876b6a1", "node_type": "1", "metadata": {"window": "Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. ", "original_text": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. "}, "hash": "aa7f2c81be1f2ef3d1d499ab765ca6546cc14201d175c725870287954938c8f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). ", "mimetype": "text/plain", "start_char_idx": 35456, "end_char_idx": 35711, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "be2059b1-c993-4075-ba87-0a9a4876b6a1": {"__data__": {"id_": "be2059b1-c993-4075-ba87-0a9a4876b6a1", "embedding": null, "metadata": {"window": "Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. ", "original_text": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "46da3bf6-3a19-4591-b4e7-e58c2df3d70a", "node_type": "1", "metadata": {"window": "In the trap images, the background is uniform, the camera an-\ngle is fixed, and the moths fill the frame. Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n", "original_text": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). "}, "hash": "4038a38bbe8f1b39611eeda1a469bf01a6738d380100ad1cbe6310d5a4249721", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e5aee2ea-a677-438a-92db-4f9006ffa7ad", "node_type": "1", "metadata": {"window": "In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. ", "original_text": "Even a relatively small number of these images (e.g.\n"}, "hash": "24660cd99fd9ad517094cd317c9d638c454bebf9fa62c85b4975a132c536ef1b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. ", "mimetype": "text/plain", "start_char_idx": 35711, "end_char_idx": 35800, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e5aee2ea-a677-438a-92db-4f9006ffa7ad": {"__data__": {"id_": "e5aee2ea-a677-438a-92db-4f9006ffa7ad", "embedding": null, "metadata": {"window": "In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. ", "original_text": "Even a relatively small number of these images (e.g.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "be2059b1-c993-4075-ba87-0a9a4876b6a1", "node_type": "1", "metadata": {"window": "Additionally, these images are low resolution (around\n120 pixels per side, on average). In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. ", "original_text": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. "}, "hash": "aa7f2c81be1f2ef3d1d499ab765ca6546cc14201d175c725870287954938c8f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "643344ab-ed1a-4ddf-a835-06cf20584250", "node_type": "1", "metadata": {"window": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. ", "original_text": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. "}, "hash": "7c985ebc96f4cd706ccf6ca66368a0703f121a5711c8c4a59d3f0dcfb8800525", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Even a relatively small number of these images (e.g.\n", "mimetype": "text/plain", "start_char_idx": 35800, "end_char_idx": 35853, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "643344ab-ed1a-4ddf-a835-06cf20584250": {"__data__": {"id_": "643344ab-ed1a-4ddf-a835-06cf20584250", "embedding": null, "metadata": {"window": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. ", "original_text": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e5aee2ea-a677-438a-92db-4f9006ffa7ad", "node_type": "1", "metadata": {"window": "In contrast, GBIF images are mostly high resolution. The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. ", "original_text": "Even a relatively small number of these images (e.g.\n"}, "hash": "24660cd99fd9ad517094cd317c9d638c454bebf9fa62c85b4975a132c536ef1b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a123fbb6-9e49-4f80-8ce3-8531619a2e99", "node_type": "1", "metadata": {"window": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. ", "original_text": "In this context, active learning appears as a very attractive\nfield of research.\n"}, "hash": "01495c9c8c894ac06e53f818764074e9ae8579869981fe89100da5180b16c9a0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. ", "mimetype": "text/plain", "start_char_idx": 35853, "end_char_idx": 36074, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a123fbb6-9e49-4f80-8ce3-8531619a2e99": {"__data__": {"id_": "a123fbb6-9e49-4f80-8ce3-8531619a2e99", "embedding": null, "metadata": {"window": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. ", "original_text": "In this context, active learning appears as a very attractive\nfield of research.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "643344ab-ed1a-4ddf-a835-06cf20584250", "node_type": "1", "metadata": {"window": "The\nbackground is heterogeneous, with moths sometimes blending into it; moths are pictured from\nvarying perspectives and often only occupy a small portion of the image. While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. ", "original_text": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. "}, "hash": "7c985ebc96f4cd706ccf6ca66368a0703f121a5711c8c4a59d3f0dcfb8800525", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "79117bf7-9333-4a0b-8199-07ee218c7e87", "node_type": "1", "metadata": {"window": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. ", "original_text": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. "}, "hash": "2b0df8df8fd9b1d3ef02091e64a50722b9a9b64b1fafe63a6488a54c3b8df2eb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In this context, active learning appears as a very attractive\nfield of research.\n", "mimetype": "text/plain", "start_char_idx": 36074, "end_char_idx": 36155, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "79117bf7-9333-4a0b-8199-07ee218c7e87": {"__data__": {"id_": "79117bf7-9333-4a0b-8199-07ee218c7e87", "embedding": null, "metadata": {"window": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. ", "original_text": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a123fbb6-9e49-4f80-8ce3-8531619a2e99", "node_type": "1", "metadata": {"window": "While strong data\naugmentation operations have been helpful in mitigating the data shift, there is still a drop in\nspecies classification accuracy of about 10% from GBIF test images to trap images (a very small\nset of such images was labeled by experts). One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. ", "original_text": "In this context, active learning appears as a very attractive\nfield of research.\n"}, "hash": "01495c9c8c894ac06e53f818764074e9ae8579869981fe89100da5180b16c9a0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b5e7937a-96c8-41db-b598-53e6ff7a6646", "node_type": "1", "metadata": {"window": "Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. ", "original_text": "Hence, it is about making the most of\nthe human annotators\u2019 work. "}, "hash": "7e03f52fc4b349cb56e64735d2dc1794d3366806a151c4d563bda838897ff1ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. ", "mimetype": "text/plain", "start_char_idx": 36155, "end_char_idx": 36387, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b5e7937a-96c8-41db-b598-53e6ff7a6646": {"__data__": {"id_": "b5e7937a-96c8-41db-b598-53e6ff7a6646", "embedding": null, "metadata": {"window": "Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. ", "original_text": "Hence, it is about making the most of\nthe human annotators\u2019 work. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "79117bf7-9333-4a0b-8199-07ee218c7e87", "node_type": "1", "metadata": {"window": "One way to tackle the problem would be to in-\ncorporate trap images to the training set. Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. ", "original_text": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. "}, "hash": "2b0df8df8fd9b1d3ef02091e64a50722b9a9b64b1fafe63a6488a54c3b8df2eb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "331f3848-841d-4b98-afc4-6a3da91974a6", "node_type": "1", "metadata": {"window": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. ", "original_text": "The typical workflow is depicted in algorithm 1. "}, "hash": "6a0b285c6bf3e06fd62ddc5f19cd05594951e4b0907bd02cb3afad9fcdca03f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, it is about making the most of\nthe human annotators\u2019 work. ", "mimetype": "text/plain", "start_char_idx": 36387, "end_char_idx": 36453, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "331f3848-841d-4b98-afc4-6a3da91974a6": {"__data__": {"id_": "331f3848-841d-4b98-afc4-6a3da91974a6", "embedding": null, "metadata": {"window": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. ", "original_text": "The typical workflow is depicted in algorithm 1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b5e7937a-96c8-41db-b598-53e6ff7a6646", "node_type": "1", "metadata": {"window": "Even a relatively small number of these images (e.g.\n 5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. ", "original_text": "Hence, it is about making the most of\nthe human annotators\u2019 work. "}, "hash": "7e03f52fc4b349cb56e64735d2dc1794d3366806a151c4d563bda838897ff1ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a89816fc-81e8-4d35-bb2f-a2c436d05c16", "node_type": "1", "metadata": {"window": "In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n", "original_text": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. "}, "hash": "6f0a36bd54312b4239613a5d98369e531caea6ee2b4304444cca9e4c58d0b710", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The typical workflow is depicted in algorithm 1. ", "mimetype": "text/plain", "start_char_idx": 36453, "end_char_idx": 36502, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a89816fc-81e8-4d35-bb2f-a2c436d05c16": {"__data__": {"id_": "a89816fc-81e8-4d35-bb2f-a2c436d05c16", "embedding": null, "metadata": {"window": "In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n", "original_text": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "331f3848-841d-4b98-afc4-6a3da91974a6", "node_type": "1", "metadata": {"window": "5% of the training set) is expected to significantly reduce the accuracy drop, and with more\nand more entomologists becoming interested in the project, the necessary work force to label\nthe images could become available. In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. ", "original_text": "The typical workflow is depicted in algorithm 1. "}, "hash": "6a0b285c6bf3e06fd62ddc5f19cd05594951e4b0907bd02cb3afad9fcdca03f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ae5c0bec-4778-429c-a77f-e3f6bce7963c", "node_type": "1", "metadata": {"window": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. ", "original_text": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. "}, "hash": "011eedb5904c7f2bfcc2ee4efff94451bdfd6afc22d4eb1bc8429f27effb92a7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. ", "mimetype": "text/plain", "start_char_idx": 36502, "end_char_idx": 36652, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ae5c0bec-4778-429c-a77f-e3f6bce7963c": {"__data__": {"id_": "ae5c0bec-4778-429c-a77f-e3f6bce7963c", "embedding": null, "metadata": {"window": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. ", "original_text": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a89816fc-81e8-4d35-bb2f-a2c436d05c16", "node_type": "1", "metadata": {"window": "In this context, active learning appears as a very attractive\nfield of research.\n Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n", "original_text": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. "}, "hash": "6f0a36bd54312b4239613a5d98369e531caea6ee2b4304444cca9e4c58d0b710", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b8c2029a-08a8-4292-bb9e-381318b954b1", "node_type": "1", "metadata": {"window": "Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n", "original_text": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. "}, "hash": "ecf7f8bbc55e974969068712736c53eb456f8e1687c3372c43a7431ddabb9048", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. ", "mimetype": "text/plain", "start_char_idx": 36652, "end_char_idx": 36745, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b8c2029a-08a8-4292-bb9e-381318b954b1": {"__data__": {"id_": "b8c2029a-08a8-4292-bb9e-381318b954b1", "embedding": null, "metadata": {"window": "Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n", "original_text": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ae5c0bec-4778-429c-a77f-e3f6bce7963c", "node_type": "1", "metadata": {"window": "Active learning (AL) encompasses all the techniques that aim to maximize the performance\nimprovement of a model upon labeling of new samples and consequent addition to the train-\ning dataset, by selecting the most valuable samples. Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. ", "original_text": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. "}, "hash": "011eedb5904c7f2bfcc2ee4efff94451bdfd6afc22d4eb1bc8429f27effb92a7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cbe33f32-cb4c-430a-8e25-2833f8866afe", "node_type": "1", "metadata": {"window": "The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n", "original_text": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. "}, "hash": "2bb806683ba31fb9968242e8f92fa954508346588a5e0a17eda616854a717d3c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. ", "mimetype": "text/plain", "start_char_idx": 36745, "end_char_idx": 36861, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cbe33f32-cb4c-430a-8e25-2833f8866afe": {"__data__": {"id_": "cbe33f32-cb4c-430a-8e25-2833f8866afe", "embedding": null, "metadata": {"window": "The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n", "original_text": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b8c2029a-08a8-4292-bb9e-381318b954b1", "node_type": "1", "metadata": {"window": "Hence, it is about making the most of\nthe human annotators\u2019 work. The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n", "original_text": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. "}, "hash": "ecf7f8bbc55e974969068712736c53eb456f8e1687c3372c43a7431ddabb9048", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fe114e0c-bcc9-490a-ab09-add26ddb6382", "node_type": "1", "metadata": {"window": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. ", "original_text": "[29]).\n"}, "hash": "92836b0980fbf3094f4804dc18ede7f12282825b2fdbb2edf4530c4aa43b0af3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. ", "mimetype": "text/plain", "start_char_idx": 36861, "end_char_idx": 36972, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fe114e0c-bcc9-490a-ab09-add26ddb6382": {"__data__": {"id_": "fe114e0c-bcc9-490a-ab09-add26ddb6382", "embedding": null, "metadata": {"window": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. ", "original_text": "[29]).\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cbe33f32-cb4c-430a-8e25-2833f8866afe", "node_type": "1", "metadata": {"window": "The typical workflow is depicted in algorithm 1. In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n", "original_text": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. "}, "hash": "2bb806683ba31fb9968242e8f92fa954508346588a5e0a17eda616854a717d3c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dc7db30d-7fd8-4df3-866e-da87e169ba26", "node_type": "1", "metadata": {"window": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. ", "original_text": "Finally, there are also techniques that combine both approaches. "}, "hash": "2745e10dce0578d1db851cb2589e7c71f8bdcd07f3bbb7f7e76fdfa31c6395cd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[29]).\n", "mimetype": "text/plain", "start_char_idx": 36972, "end_char_idx": 36979, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dc7db30d-7fd8-4df3-866e-da87e169ba26": {"__data__": {"id_": "dc7db30d-7fd8-4df3-866e-da87e169ba26", "embedding": null, "metadata": {"window": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. ", "original_text": "Finally, there are also techniques that combine both approaches. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fe114e0c-bcc9-490a-ab09-add26ddb6382", "node_type": "1", "metadata": {"window": "In the past\ndecade, with the emergence of data-hungry deep learning, AL has attracted renewed interest,\nand plenty of techniques have been developed. There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. ", "original_text": "[29]).\n"}, "hash": "92836b0980fbf3094f4804dc18ede7f12282825b2fdbb2edf4530c4aa43b0af3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b34b9390-01c5-4c2d-8183-d8ebd67ece64", "node_type": "1", "metadata": {"window": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. ", "original_text": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n"}, "hash": "df793aea954b8e058d55b97bb122ff74451c2439f35f6cc5163886ac08b19298", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, there are also techniques that combine both approaches. ", "mimetype": "text/plain", "start_char_idx": 36979, "end_char_idx": 37044, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b34b9390-01c5-4c2d-8183-d8ebd67ece64": {"__data__": {"id_": "b34b9390-01c5-4c2d-8183-d8ebd67ece64", "embedding": null, "metadata": {"window": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. ", "original_text": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dc7db30d-7fd8-4df3-866e-da87e169ba26", "node_type": "1", "metadata": {"window": "There are two main categories: uncertainty-\nbased techniques and diversity-based techniques. The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. ", "original_text": "Finally, there are also techniques that combine both approaches. "}, "hash": "2745e10dce0578d1db851cb2589e7c71f8bdcd07f3bbb7f7e76fdfa31c6395cd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2fa7d627-bb0d-4614-9edd-00136a8223cd", "node_type": "1", "metadata": {"window": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n", "original_text": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n"}, "hash": "265354bd22f10ff1612e6f6c9e216dd1f6009c63cae45cdecd2d56385721e4dd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n", "mimetype": "text/plain", "start_char_idx": 37044, "end_char_idx": 37212, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2fa7d627-bb0d-4614-9edd-00136a8223cd": {"__data__": {"id_": "2fa7d627-bb0d-4614-9edd-00136a8223cd", "embedding": null, "metadata": {"window": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n", "original_text": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b34b9390-01c5-4c2d-8183-d8ebd67ece64", "node_type": "1", "metadata": {"window": "The previous try to find images the model\nisn\u2019t certain about, as a proxy of the model being wrong about the image. The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. ", "original_text": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n"}, "hash": "df793aea954b8e058d55b97bb122ff74451c2439f35f6cc5163886ac08b19298", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "772f82ad-0231-4d09-b381-dc5d72d95e00", "node_type": "1", "metadata": {"window": "[29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. ", "original_text": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. "}, "hash": "573ab21180b3d432b6d8c38aff9b947f8fbf96dd27ee31c4e3af815ad8c27ffe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n", "mimetype": "text/plain", "start_char_idx": 37212, "end_char_idx": 37517, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "772f82ad-0231-4d09-b381-dc5d72d95e00": {"__data__": {"id_": "772f82ad-0231-4d09-b381-dc5d72d95e00", "embedding": null, "metadata": {"window": "[29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. ", "original_text": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2fa7d627-bb0d-4614-9edd-00136a8223cd", "node_type": "1", "metadata": {"window": "The latter try to find\nimages that best represent the diversity existing in the pool of unlabeled images (e.g. [29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n", "original_text": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n"}, "hash": "265354bd22f10ff1612e6f6c9e216dd1f6009c63cae45cdecd2d56385721e4dd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a32bb209-d3ab-4683-ac5a-4a50097bd8fb", "node_type": "1", "metadata": {"window": "Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n", "original_text": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. "}, "hash": "09cae7bbd1fe02268f214cd09f29e6bf72ce7f74489f14888574ed327888b230", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. ", "mimetype": "text/plain", "start_char_idx": 37517, "end_char_idx": 37976, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a32bb209-d3ab-4683-ac5a-4a50097bd8fb": {"__data__": {"id_": "a32bb209-d3ab-4683-ac5a-4a50097bd8fb", "embedding": null, "metadata": {"window": "Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n", "original_text": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "772f82ad-0231-4d09-b381-dc5d72d95e00", "node_type": "1", "metadata": {"window": "[29]).\n Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. ", "original_text": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. "}, "hash": "573ab21180b3d432b6d8c38aff9b947f8fbf96dd27ee31c4e3af815ad8c27ffe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d2dfc24e-c6f6-4b85-a15d-244cf347e221", "node_type": "1", "metadata": {"window": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). ", "original_text": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. "}, "hash": "b93d60bb9430beb40e724f0311640c890d628b1f5b05de08a36aa63cc68a5b20", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. ", "mimetype": "text/plain", "start_char_idx": 37976, "end_char_idx": 38137, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d2dfc24e-c6f6-4b85-a15d-244cf347e221": {"__data__": {"id_": "d2dfc24e-c6f6-4b85-a15d-244cf347e221", "embedding": null, "metadata": {"window": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). ", "original_text": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a32bb209-d3ab-4683-ac5a-4a50097bd8fb", "node_type": "1", "metadata": {"window": "Finally, there are also techniques that combine both approaches. For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n", "original_text": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. "}, "hash": "09cae7bbd1fe02268f214cd09f29e6bf72ce7f74489f14888574ed327888b230", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "79ad8d9f-5a09-45a6-96df-6f80d24405bb", "node_type": "1", "metadata": {"window": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. ", "original_text": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n"}, "hash": "5732e9a0c2cce116dafa7ebbffc2cec1e82fb53a97d7fc0698ce154c38388ef9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. ", "mimetype": "text/plain", "start_char_idx": 38137, "end_char_idx": 38288, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "79ad8d9f-5a09-45a6-96df-6f80d24405bb": {"__data__": {"id_": "79ad8d9f-5a09-45a6-96df-6f80d24405bb", "embedding": null, "metadata": {"window": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. ", "original_text": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d2dfc24e-c6f6-4b85-a15d-244cf347e221", "node_type": "1", "metadata": {"window": "For an initial exploration of\nthe topic under strict time constraints, precedence was given to the more easily implementable\nand scalable uncertainty-based techniques.\n 19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). ", "original_text": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. "}, "hash": "b93d60bb9430beb40e724f0311640c890d628b1f5b05de08a36aa63cc68a5b20", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5677f329-585c-4409-bcaa-e3ddf3f21d72", "node_type": "1", "metadata": {"window": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). ", "original_text": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. "}, "hash": "99f7f11ca0a033706145b09eb35bd463713e36cdbf445dd88f042bacc9bddcc2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n", "mimetype": "text/plain", "start_char_idx": 38288, "end_char_idx": 38479, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5677f329-585c-4409-bcaa-e3ddf3f21d72": {"__data__": {"id_": "5677f329-585c-4409-bcaa-e3ddf3f21d72", "embedding": null, "metadata": {"window": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). ", "original_text": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "79ad8d9f-5a09-45a6-96df-6f80d24405bb", "node_type": "1", "metadata": {"window": "19\n\n(a) Eudeilinia herminiata\n (b) Acronicta insita\n (c) Thysania zenobia\n(d) Syngrapha rectangula\n (e) Cosmia calami\nFigure 3.1: Images from GBIF\nFigure 3.2: Images from moth traps, following object detection\n20\n\nAlgorithm 1The pool-based active learning workflow, which is most common in deep learning.\n From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. ", "original_text": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n"}, "hash": "5732e9a0c2cce116dafa7ebbffc2cec1e82fb53a97d7fc0698ce154c38388ef9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d", "node_type": "1", "metadata": {"window": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. ", "original_text": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n"}, "hash": "f2f5deedcfaec54be60a7d070ae7f362a4639dcf096b1cd73e1fd2048d24f0ff", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. ", "mimetype": "text/plain", "start_char_idx": 38479, "end_char_idx": 38641, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d": {"__data__": {"id_": "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d", "embedding": null, "metadata": {"window": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. ", "original_text": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5677f329-585c-4409-bcaa-e3ddf3f21d72", "node_type": "1", "metadata": {"window": "From [30]\nInput :Initial training dataset L, unlabeled data pool U, annotators A\nOutput: A well-trained model M with least labeling cost\n1: while End condition isn\u2019t met do\n2: Train the model M with L\n3: Obtain the representation R of all samples x \u2208 U, R = M(x)\n4: Query the top-K informative samples K via selection strategies, based on R\n5: Annotate the samples K and obtain the labels YK = A(K)\n6: Update L = L \u222a {K, YK}, update U = U/K\n7: end while\n3.1. M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). ", "original_text": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. "}, "hash": "99f7f11ca0a033706145b09eb35bd463713e36cdbf445dd88f042bacc9bddcc2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f9efa154-016b-4c2a-b511-5a84ec784152", "node_type": "1", "metadata": {"window": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. ", "original_text": "if the image is blurred or occluded). "}, "hash": "e02b9e43eedd14dc8b35b8028eb55719225213ee687bb5977d31db61e14e6163", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n", "mimetype": "text/plain", "start_char_idx": 38641, "end_char_idx": 38768, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f9efa154-016b-4c2a-b511-5a84ec784152": {"__data__": {"id_": "f9efa154-016b-4c2a-b511-5a84ec784152", "embedding": null, "metadata": {"window": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. ", "original_text": "if the image is blurred or occluded). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d", "node_type": "1", "metadata": {"window": "M EASURES OF UNCERTAINTY\nNeural networks predictions are notoriously unreliable when the input sample is out of the\ntraining distribution or corrupted by noise. To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. ", "original_text": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n"}, "hash": "f2f5deedcfaec54be60a7d070ae7f362a4639dcf096b1cd73e1fd2048d24f0ff", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d41b5648-bf48-4c47-9696-1cbaaa778a5c", "node_type": "1", "metadata": {"window": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n", "original_text": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. "}, "hash": "1264731494ff3b332ff9d3e737ca66d75b19d6c837ba0b292729793a3c6c0957", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "if the image is blurred or occluded). ", "mimetype": "text/plain", "start_char_idx": 38768, "end_char_idx": 38806, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d41b5648-bf48-4c47-9696-1cbaaa778a5c": {"__data__": {"id_": "d41b5648-bf48-4c47-9696-1cbaaa778a5c", "embedding": null, "metadata": {"window": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n", "original_text": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f9efa154-016b-4c2a-b511-5a84ec784152", "node_type": "1", "metadata": {"window": "To effectively apply active learning techniques in\ndeep learning frameworks, better estimations of uncertainty are needed.Predictive uncertainty,\ni.e. the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. ", "original_text": "if the image is blurred or occluded). "}, "hash": "e02b9e43eedd14dc8b35b8028eb55719225213ee687bb5977d31db61e14e6163", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d54b9520-11fb-4782-93f7-3fc906b24ff1", "node_type": "1", "metadata": {"window": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). ", "original_text": "a deep learning model). "}, "hash": "e8a43107db65aae2f051f262a441537212bee7f2972c44e5811e9917db7d1b41", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. ", "mimetype": "text/plain", "start_char_idx": 38806, "end_char_idx": 38929, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d54b9520-11fb-4782-93f7-3fc906b24ff1": {"__data__": {"id_": "d54b9520-11fb-4782-93f7-3fc906b24ff1", "embedding": null, "metadata": {"window": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). ", "original_text": "a deep learning model). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d41b5648-bf48-4c47-9696-1cbaaa778a5c", "node_type": "1", "metadata": {"window": "the uncertainty related to the prediction \u02c6y(x\u2217) for a concrete query instance x\u2217, can be de-\ncomposed into two distinct types of uncertainty: aleatoric uncertaintyand epistemic uncertainty.\n Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n", "original_text": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. "}, "hash": "1264731494ff3b332ff9d3e737ca66d75b19d6c837ba0b292729793a3c6c0957", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f", "node_type": "1", "metadata": {"window": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. ", "original_text": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. "}, "hash": "df013d395c448a2469b425c1b81ee3dd7824aac3096d511d7a7e19606ac59db0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "a deep learning model). ", "mimetype": "text/plain", "start_char_idx": 38929, "end_char_idx": 38953, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f": {"__data__": {"id_": "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f", "embedding": null, "metadata": {"window": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. ", "original_text": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d54b9520-11fb-4782-93f7-3fc906b24ff1", "node_type": "1", "metadata": {"window": "Generally, the previous refers to the notion of randomness, that is, the variability in the out-\ncome of an experiment which is due to inherently random effects. In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). ", "original_text": "a deep learning model). "}, "hash": "e8a43107db65aae2f051f262a441537212bee7f2972c44e5811e9917db7d1b41", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c", "node_type": "1", "metadata": {"window": "if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. ", "original_text": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. "}, "hash": "3c656eb8c26a79f104aa469174b32fc60a319a4effbf29b9720ab7172cb08871", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. ", "mimetype": "text/plain", "start_char_idx": 38953, "end_char_idx": 39082, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c": {"__data__": {"id_": "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c", "embedding": null, "metadata": {"window": "if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. ", "original_text": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f", "node_type": "1", "metadata": {"window": "In the context of image\nclassification, aleatoric uncertainty relates to the inherent difficulty of classifying an image (e.g.\n if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. ", "original_text": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. "}, "hash": "df013d395c448a2469b425c1b81ee3dd7824aac3096d511d7a7e19606ac59db0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6", "node_type": "1", "metadata": {"window": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. ", "original_text": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n"}, "hash": "2e63d758b0d820fec147fb7a4e736939893b2a9be5739d712a47cd059bd8dcf9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. ", "mimetype": "text/plain", "start_char_idx": 39082, "end_char_idx": 39170, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6": {"__data__": {"id_": "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6", "embedding": null, "metadata": {"window": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. ", "original_text": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c", "node_type": "1", "metadata": {"window": "if the image is blurred or occluded). Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. ", "original_text": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. "}, "hash": "3c656eb8c26a79f104aa469174b32fc60a319a4effbf29b9720ab7172cb08871", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c9566a54-0a82-4e1c-bdd0-17619158433e", "node_type": "1", "metadata": {"window": "a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n", "original_text": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). "}, "hash": "27f4b3ef7f9b781c0297f991f74c619666b13727ac680ca89a21083c5f84f37b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n", "mimetype": "text/plain", "start_char_idx": 39170, "end_char_idx": 39267, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c9566a54-0a82-4e1c-bdd0-17619158433e": {"__data__": {"id_": "c9566a54-0a82-4e1c-bdd0-17619158433e", "embedding": null, "metadata": {"window": "a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n", "original_text": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6", "node_type": "1", "metadata": {"window": "Epistemic uncertainty refers to uncertainty caused by a\nlack of knowledge, i.e., to the epistemic state of the agent (e.g. a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. ", "original_text": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n"}, "hash": "2e63d758b0d820fec147fb7a4e736939893b2a9be5739d712a47cd059bd8dcf9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6", "node_type": "1", "metadata": {"window": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. ", "original_text": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. "}, "hash": "1cd84cbd8227297fcd887baaa3dbecca3005efb65f78f635003d04d08d2e685c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). ", "mimetype": "text/plain", "start_char_idx": 39267, "end_char_idx": 39398, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6": {"__data__": {"id_": "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6", "embedding": null, "metadata": {"window": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. ", "original_text": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c9566a54-0a82-4e1c-bdd0-17619158433e", "node_type": "1", "metadata": {"window": "a deep learning model). As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n", "original_text": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). "}, "hash": "27f4b3ef7f9b781c0297f991f74c619666b13727ac680ca89a21083c5f84f37b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e", "node_type": "1", "metadata": {"window": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. ", "original_text": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. "}, "hash": "b6b90412e2167fa2015b48b0a3dd1c77edeef2bf963a57da63aebfef67443c08", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. ", "mimetype": "text/plain", "start_char_idx": 39398, "end_char_idx": 39476, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e": {"__data__": {"id_": "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e", "embedding": null, "metadata": {"window": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. ", "original_text": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6", "node_type": "1", "metadata": {"window": "As op-\nposed to aleatoric uncertainty, epistemic uncertainty can in principle be reduced on the basis of\nadditional information. This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. ", "original_text": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. "}, "hash": "1cd84cbd8227297fcd887baaa3dbecca3005efb65f78f635003d04d08d2e685c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1355a4e1-6c67-44b4-9b38-6655fd46ee2d", "node_type": "1", "metadata": {"window": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. ", "original_text": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. "}, "hash": "a405b2888b1362dcd11f2c93badd5817e12c72a3a054051b8f7b4acdb9e92332", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. ", "mimetype": "text/plain", "start_char_idx": 39476, "end_char_idx": 39568, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1355a4e1-6c67-44b4-9b38-6655fd46ee2d": {"__data__": {"id_": "1355a4e1-6c67-44b4-9b38-6655fd46ee2d", "embedding": null, "metadata": {"window": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. ", "original_text": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e", "node_type": "1", "metadata": {"window": "This should precisely be the goal of uncertainty-based active learning\ntechniques [31]. In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. ", "original_text": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. "}, "hash": "b6b90412e2167fa2015b48b0a3dd1c77edeef2bf963a57da63aebfef67443c08", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cb36c6b2-6823-4b87-b4d6-4d3f536b354e", "node_type": "1", "metadata": {"window": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. ", "original_text": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n"}, "hash": "740c74cf12c91d1fb19a716080baa9779624998f07e3929f5a35dca9a339bbc3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. ", "mimetype": "text/plain", "start_char_idx": 39568, "end_char_idx": 39758, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cb36c6b2-6823-4b87-b4d6-4d3f536b354e": {"__data__": {"id_": "cb36c6b2-6823-4b87-b4d6-4d3f536b354e", "embedding": null, "metadata": {"window": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. ", "original_text": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1355a4e1-6c67-44b4-9b38-6655fd46ee2d", "node_type": "1", "metadata": {"window": "In other words, the goal is to find images that the model should be able to\nclassify, but can\u2019t.\n A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. ", "original_text": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. "}, "hash": "a405b2888b1362dcd11f2c93badd5817e12c72a3a054051b8f7b4acdb9e92332", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2bf501ac-e516-4c53-85fc-fa976cc093c4", "node_type": "1", "metadata": {"window": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). ", "original_text": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. "}, "hash": "d30d257c07c3c7924982784527c37bf707590377d3581c1d3612e950e1203522", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n", "mimetype": "text/plain", "start_char_idx": 39758, "end_char_idx": 39930, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2bf501ac-e516-4c53-85fc-fa976cc093c4": {"__data__": {"id_": "2bf501ac-e516-4c53-85fc-fa976cc093c4", "embedding": null, "metadata": {"window": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). ", "original_text": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cb36c6b2-6823-4b87-b4d6-4d3f536b354e", "node_type": "1", "metadata": {"window": "A classic deep learning model typically provides an estimate of aleatoric uncertainty (which,\nas stated above, can be unreliable). Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. ", "original_text": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n"}, "hash": "740c74cf12c91d1fb19a716080baa9779624998f07e3929f5a35dca9a339bbc3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "77d837d8-7954-41d2-adb5-48465d6ef4cb", "node_type": "1", "metadata": {"window": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). ", "original_text": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. "}, "hash": "76b1f17adbb181468c57e7914f6491b8bc3fd23bac9aa50d345cb61041ec058f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. ", "mimetype": "text/plain", "start_char_idx": 39930, "end_char_idx": 40170, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "77d837d8-7954-41d2-adb5-48465d6ef4cb": {"__data__": {"id_": "77d837d8-7954-41d2-adb5-48465d6ef4cb", "embedding": null, "metadata": {"window": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). ", "original_text": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2bf501ac-e516-4c53-85fc-fa976cc093c4", "node_type": "1", "metadata": {"window": "Individually, such a model can\u2019t be used to estimate epis-\ntemic uncertainty. Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). ", "original_text": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. "}, "hash": "d30d257c07c3c7924982784527c37bf707590377d3581c1d3612e950e1203522", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "63715421-8ce4-4bfe-b926-4ee480c97509", "node_type": "1", "metadata": {"window": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. ", "original_text": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. "}, "hash": "44912f1ac8a44295833982f5504bcbd0500018f021988fce8821c0f22d2f0e5c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. ", "mimetype": "text/plain", "start_char_idx": 40170, "end_char_idx": 40320, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "63715421-8ce4-4bfe-b926-4ee480c97509": {"__data__": {"id_": "63715421-8ce4-4bfe-b926-4ee480c97509", "embedding": null, "metadata": {"window": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. ", "original_text": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "77d837d8-7954-41d2-adb5-48465d6ef4cb", "node_type": "1", "metadata": {"window": "Instead, it can be done if multiple models are available, by looking at model\ndisagreement. Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). ", "original_text": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. "}, "hash": "76b1f17adbb181468c57e7914f6491b8bc3fd23bac9aa50d345cb61041ec058f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4027d04e-7150-45ef-bc0f-ca04bc17db98", "node_type": "1", "metadata": {"window": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n", "original_text": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. "}, "hash": "b6aa9db6555d30b8620211dce655ee17e5b443a6411db8cd83a331a49bccd3ac", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. ", "mimetype": "text/plain", "start_char_idx": 40320, "end_char_idx": 40478, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4027d04e-7150-45ef-bc0f-ca04bc17db98": {"__data__": {"id_": "4027d04e-7150-45ef-bc0f-ca04bc17db98", "embedding": null, "metadata": {"window": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n", "original_text": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "63715421-8ce4-4bfe-b926-4ee480c97509", "node_type": "1", "metadata": {"window": "Groups of models are called ensembles, and they are commonly obtained by\nrunning multiple trainings: each training is a random process that results in (slightly) different\nmodel parameters. Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. ", "original_text": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. "}, "hash": "44912f1ac8a44295833982f5504bcbd0500018f021988fce8821c0f22d2f0e5c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "194396d4-069d-4aa5-b5b9-568268f53966", "node_type": "1", "metadata": {"window": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n", "original_text": "H(p) \u2264 log(|K|). "}, "hash": "700f28fb9f11e85d8ac657d59dbf437e8f0b12f72259e5b7a73a653f13a35395", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. ", "mimetype": "text/plain", "start_char_idx": 40478, "end_char_idx": 40701, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "194396d4-069d-4aa5-b5b9-568268f53966": {"__data__": {"id_": "194396d4-069d-4aa5-b5b9-568268f53966", "embedding": null, "metadata": {"window": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n", "original_text": "H(p) \u2264 log(|K|). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4027d04e-7150-45ef-bc0f-ca04bc17db98", "node_type": "1", "metadata": {"window": "Intuitively, if two models disagree on a sample, at least one of them must\nbe wrong, while the other might be right, suggesting that it is possible to classify the sample.\n Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n", "original_text": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. "}, "hash": "b6aa9db6555d30b8620211dce655ee17e5b443a6411db8cd83a331a49bccd3ac", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f", "node_type": "1", "metadata": {"window": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. ", "original_text": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). "}, "hash": "5dc3c8492cea24344a55ce160b2c9d79d1ef03e5fcbe61e1da2910043b8a7497", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "H(p) \u2264 log(|K|). ", "mimetype": "text/plain", "start_char_idx": 40701, "end_char_idx": 40718, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f": {"__data__": {"id_": "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f", "embedding": null, "metadata": {"window": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. ", "original_text": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "194396d4-069d-4aa5-b5b9-568268f53966", "node_type": "1", "metadata": {"window": "Another way to look at it is that models are more likely to disagree on samples that are out-\nof-distribution, because in these \"regions\" the decision boundary and the feature space itself\nare more likely to evolve differently at training. A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n", "original_text": "H(p) \u2264 log(|K|). "}, "hash": "700f28fb9f11e85d8ac657d59dbf437e8f0b12f72259e5b7a73a653f13a35395", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b943049b-79bf-44ea-910c-84c6830e1858", "node_type": "1", "metadata": {"window": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. ", "original_text": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. "}, "hash": "f77c12b4c4fe937389d935662d593837447a799c05b2a53507a891dbd1974705", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). ", "mimetype": "text/plain", "start_char_idx": 40718, "end_char_idx": 40872, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b943049b-79bf-44ea-910c-84c6830e1858": {"__data__": {"id_": "b943049b-79bf-44ea-910c-84c6830e1858", "embedding": null, "metadata": {"window": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. ", "original_text": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f", "node_type": "1", "metadata": {"window": "A common estimator for epistemic uncertainty\nin the context of ensembles is the Jensen-Shannon Divergence, most frequently called Mutual\nInformation. Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. ", "original_text": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). "}, "hash": "5dc3c8492cea24344a55ce160b2c9d79d1ef03e5fcbe61e1da2910043b8a7497", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "14a323dc-0a44-44be-8bc9-98a41d5b6a7f", "node_type": "1", "metadata": {"window": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. ", "original_text": "This shows that mutual information is higher for samples with high disagreement.\n"}, "hash": "8bcbe11cad3df833bf7cd167edf3fc663502ce46f0bc303bea013de23e67a4f0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. ", "mimetype": "text/plain", "start_char_idx": 40872, "end_char_idx": 40993, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "14a323dc-0a44-44be-8bc9-98a41d5b6a7f": {"__data__": {"id_": "14a323dc-0a44-44be-8bc9-98a41d5b6a7f", "embedding": null, "metadata": {"window": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. ", "original_text": "This shows that mutual information is higher for samples with high disagreement.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b943049b-79bf-44ea-910c-84c6830e1858", "node_type": "1", "metadata": {"window": "Given the vectors of predicted probabilities p(e) over the set of classes K for each\nmodel e in the ensemble E, the average probability vector p is computed. The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. ", "original_text": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. "}, "hash": "f77c12b4c4fe937389d935662d593837447a799c05b2a53507a891dbd1974705", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d", "node_type": "1", "metadata": {"window": "H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. ", "original_text": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n"}, "hash": "ff953b96b1d7e297fa17be8588ef14a10706ace9bff8ffa1993b5c774be00ebf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This shows that mutual information is higher for samples with high disagreement.\n", "mimetype": "text/plain", "start_char_idx": 40993, "end_char_idx": 41074, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d": {"__data__": {"id_": "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d", "embedding": null, "metadata": {"window": "H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. ", "original_text": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "14a323dc-0a44-44be-8bc9-98a41d5b6a7f", "node_type": "1", "metadata": {"window": "The entropy of the\naverage probability vector can be used as a measure of predictive uncertainty:\nH(p) = p\u22a4 log(p) (3.1)\nIt can be shown that the entropy is upper bounder by the cardinality of the random variable,\n21\n\ni.e. H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. ", "original_text": "This shows that mutual information is higher for samples with high disagreement.\n"}, "hash": "8bcbe11cad3df833bf7cd167edf3fc663502ce46f0bc303bea013de23e67a4f0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "73f090ec-beef-4045-a6a2-9d91019279ab", "node_type": "1", "metadata": {"window": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. ", "original_text": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. "}, "hash": "3e134343c3187fd488682f2826d92aab14827dbf9793a28145f140ec2cd20b48", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n", "mimetype": "text/plain", "start_char_idx": 41074, "end_char_idx": 41258, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "73f090ec-beef-4045-a6a2-9d91019279ab": {"__data__": {"id_": "73f090ec-beef-4045-a6a2-9d91019279ab", "embedding": null, "metadata": {"window": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. ", "original_text": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d", "node_type": "1", "metadata": {"window": "H(p) \u2264 log(|K|). The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. ", "original_text": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n"}, "hash": "ff953b96b1d7e297fa17be8588ef14a10706ace9bff8ffa1993b5c774be00ebf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "60a03d25-6aff-4e41-bc3c-61ecacf1287d", "node_type": "1", "metadata": {"window": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. ", "original_text": "These functions are used to evaluate the unlabeled pool of\nsamples. "}, "hash": "ebd240acebe334d62ab28a4c938c6957ae70ff1393964915bd3e7631a490b063", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. ", "mimetype": "text/plain", "start_char_idx": 41258, "end_char_idx": 41390, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "60a03d25-6aff-4e41-bc3c-61ecacf1287d": {"__data__": {"id_": "60a03d25-6aff-4e41-bc3c-61ecacf1287d", "embedding": null, "metadata": {"window": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. ", "original_text": "These functions are used to evaluate the unlabeled pool of\nsamples. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "73f090ec-beef-4045-a6a2-9d91019279ab", "node_type": "1", "metadata": {"window": "The mutual information is defined as:\nJ(p) = H(p) \u2212 1\nE \u2211\ne\u2208E\nH(p(e)) (3.2)\nSince entropy is always positive, the maximum possible value forJ(p) is H(p). However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. ", "original_text": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. "}, "hash": "3e134343c3187fd488682f2826d92aab14827dbf9793a28145f140ec2cd20b48", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "85814af2-e9f6-468f-a04b-5c9383455f9b", "node_type": "1", "metadata": {"window": "This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. ", "original_text": "The highest the score, the highest is the expected value of the sample. "}, "hash": "47bddb3f87d4d336e4eecf998f16619b4744077370008a8dd16cb176f84dbe93", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These functions are used to evaluate the unlabeled pool of\nsamples. ", "mimetype": "text/plain", "start_char_idx": 41390, "end_char_idx": 41458, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "85814af2-e9f6-468f-a04b-5c9383455f9b": {"__data__": {"id_": "85814af2-e9f6-468f-a04b-5c9383455f9b", "embedding": null, "metadata": {"window": "This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. ", "original_text": "The highest the score, the highest is the expected value of the sample. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "60a03d25-6aff-4e41-bc3c-61ecacf1287d", "node_type": "1", "metadata": {"window": "However, when\nthe models make similar predictions, 1\nE \u2211e\u2208E H(p(e)) \u2192 H(p), thus J(p) \u2192 0, which is its mini-\nmum value. This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. ", "original_text": "These functions are used to evaluate the unlabeled pool of\nsamples. "}, "hash": "ebd240acebe334d62ab28a4c938c6957ae70ff1393964915bd3e7631a490b063", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e532e794-2d4e-4e9d-a45a-5283435cbe30", "node_type": "1", "metadata": {"window": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. ", "original_text": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. "}, "hash": "d7023b56bb118dbacd284b8c2167808cbb7341673ea5592320e42361293076ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The highest the score, the highest is the expected value of the sample. ", "mimetype": "text/plain", "start_char_idx": 41458, "end_char_idx": 41530, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e532e794-2d4e-4e9d-a45a-5283435cbe30": {"__data__": {"id_": "e532e794-2d4e-4e9d-a45a-5283435cbe30", "embedding": null, "metadata": {"window": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. ", "original_text": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "85814af2-e9f6-468f-a04b-5c9383455f9b", "node_type": "1", "metadata": {"window": "This shows that mutual information is higher for samples with high disagreement.\n An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. ", "original_text": "The highest the score, the highest is the expected value of the sample. "}, "hash": "47bddb3f87d4d336e4eecf998f16619b4744077370008a8dd16cb176f84dbe93", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bce3834a-780e-4202-824d-144f2398c763", "node_type": "1", "metadata": {"window": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n", "original_text": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. "}, "hash": "17022b158c9a5471199377c2df03b392fbd069781d0605b178b72961118e90ac", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. ", "mimetype": "text/plain", "start_char_idx": 41530, "end_char_idx": 41619, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bce3834a-780e-4202-824d-144f2398c763": {"__data__": {"id_": "bce3834a-780e-4202-824d-144f2398c763", "embedding": null, "metadata": {"window": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n", "original_text": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e532e794-2d4e-4e9d-a45a-5283435cbe30", "node_type": "1", "metadata": {"window": "An alternative way to look at the formula is that from the predictive uncertainty, we subtract\naway the expected aleatoric uncertainty, leaving a measure of the epistemic uncertainty.\n In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. ", "original_text": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. "}, "hash": "d7023b56bb118dbacd284b8c2167808cbb7341673ea5592320e42361293076ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "733521fb-4cdb-4355-8db3-077f451b6a1e", "node_type": "1", "metadata": {"window": "These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n", "original_text": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. "}, "hash": "97bed994b92ffb0145e9bf5872019b0b84c2ce201b06f90493c4fbb629946a9a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. ", "mimetype": "text/plain", "start_char_idx": 41619, "end_char_idx": 41719, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "733521fb-4cdb-4355-8db3-077f451b6a1e": {"__data__": {"id_": "733521fb-4cdb-4355-8db3-077f451b6a1e", "embedding": null, "metadata": {"window": "These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n", "original_text": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bce3834a-780e-4202-824d-144f2398c763", "node_type": "1", "metadata": {"window": "In the context of active learning, entropy and mutual information are popular scoring functions,\nalso called acquisition functions. These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n", "original_text": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. "}, "hash": "17022b158c9a5471199377c2df03b392fbd069781d0605b178b72961118e90ac", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3792bbe8-915e-4b73-b991-62852b05a285", "node_type": "1", "metadata": {"window": "The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. ", "original_text": "These two scoring functions are conceptually similar to the entropy. "}, "hash": "ccdc2a61fe4d017dfbba0815948132108408863b47fff42bed18a7a83668d112", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. ", "mimetype": "text/plain", "start_char_idx": 41719, "end_char_idx": 41836, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3792bbe8-915e-4b73-b991-62852b05a285": {"__data__": {"id_": "3792bbe8-915e-4b73-b991-62852b05a285", "embedding": null, "metadata": {"window": "The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. ", "original_text": "These two scoring functions are conceptually similar to the entropy. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "733521fb-4cdb-4355-8db3-077f451b6a1e", "node_type": "1", "metadata": {"window": "These functions are used to evaluate the unlabeled pool of\nsamples. The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n", "original_text": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. "}, "hash": "97bed994b92ffb0145e9bf5872019b0b84c2ce201b06f90493c4fbb629946a9a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c07f7f64-1137-4da6-b539-a0fba5a7e33b", "node_type": "1", "metadata": {"window": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). ", "original_text": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. "}, "hash": "afc866d7424d4eeeee21964fd2e22614d80519d3c313032ec44a8bd1788c97be", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These two scoring functions are conceptually similar to the entropy. ", "mimetype": "text/plain", "start_char_idx": 41836, "end_char_idx": 41905, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c07f7f64-1137-4da6-b539-a0fba5a7e33b": {"__data__": {"id_": "c07f7f64-1137-4da6-b539-a0fba5a7e33b", "embedding": null, "metadata": {"window": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). ", "original_text": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3792bbe8-915e-4b73-b991-62852b05a285", "node_type": "1", "metadata": {"window": "The highest the score, the highest is the expected value of the sample. Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. ", "original_text": "These two scoring functions are conceptually similar to the entropy. "}, "hash": "ccdc2a61fe4d017dfbba0815948132108408863b47fff42bed18a7a83668d112", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "375a0faf-c84b-41f9-aeed-5e7aaec40b75", "node_type": "1", "metadata": {"window": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. ", "original_text": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n"}, "hash": "be9bcc74efc5336cae1aa6b45c958baf7255e151019c4101446e104375c5adf7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. ", "mimetype": "text/plain", "start_char_idx": 41905, "end_char_idx": 42113, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "375a0faf-c84b-41f9-aeed-5e7aaec40b75": {"__data__": {"id_": "375a0faf-c84b-41f9-aeed-5e7aaec40b75", "embedding": null, "metadata": {"window": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. ", "original_text": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c07f7f64-1137-4da6-b539-a0fba5a7e33b", "node_type": "1", "metadata": {"window": "Other common\nscoring methods are least confidence, margin sampling and variation ratios. Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). ", "original_text": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. "}, "hash": "afc866d7424d4eeeee21964fd2e22614d80519d3c313032ec44a8bd1788c97be", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ce787730-d0c5-48b2-ad6a-569a719433be", "node_type": "1", "metadata": {"window": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. ", "original_text": "All the implemented scoring functions are summarized in Table 3.1.\n"}, "hash": "72cb851aa7398ed9093652d335095aacc8e443c1c19436bc66d8bae99dd8ebe3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n", "mimetype": "text/plain", "start_char_idx": 42113, "end_char_idx": 42307, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ce787730-d0c5-48b2-ad6a-569a719433be": {"__data__": {"id_": "ce787730-d0c5-48b2-ad6a-569a719433be", "embedding": null, "metadata": {"window": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. ", "original_text": "All the implemented scoring functions are summarized in Table 3.1.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "375a0faf-c84b-41f9-aeed-5e7aaec40b75", "node_type": "1", "metadata": {"window": "Least confidence is\nto select the sample with the smallest probability of the top1 predicted class. Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. ", "original_text": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n"}, "hash": "be9bcc74efc5336cae1aa6b45c958baf7255e151019c4101446e104375c5adf7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "de251c54-1177-42fc-bd1d-7a80c0d74fcd", "node_type": "1", "metadata": {"window": "These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. ", "original_text": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. "}, "hash": "e519bc989a16ec94ab71771beb8f85b4a9d1fd8e3d77adb3a3727359929579d7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "All the implemented scoring functions are summarized in Table 3.1.\n", "mimetype": "text/plain", "start_char_idx": 42307, "end_char_idx": 42374, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "de251c54-1177-42fc-bd1d-7a80c0d74fcd": {"__data__": {"id_": "de251c54-1177-42fc-bd1d-7a80c0d74fcd", "embedding": null, "metadata": {"window": "These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. ", "original_text": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ce787730-d0c5-48b2-ad6a-569a719433be", "node_type": "1", "metadata": {"window": "Margin sampling\nis to calculate the difference between the probabilities of the top-1 and the top-2 predicted\nclass. These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. ", "original_text": "All the implemented scoring functions are summarized in Table 3.1.\n"}, "hash": "72cb851aa7398ed9093652d335095aacc8e443c1c19436bc66d8bae99dd8ebe3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "35d63551-ba33-4ebb-aec0-49f214ae7e66", "node_type": "1", "metadata": {"window": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. ", "original_text": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). "}, "hash": "9a55d30ea51897c8b9d65cd0da1c9e6f9881b7e45623a5a682b384ac9ae00703", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. ", "mimetype": "text/plain", "start_char_idx": 42374, "end_char_idx": 42734, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "35d63551-ba33-4ebb-aec0-49f214ae7e66": {"__data__": {"id_": "35d63551-ba33-4ebb-aec0-49f214ae7e66", "embedding": null, "metadata": {"window": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. ", "original_text": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "de251c54-1177-42fc-bd1d-7a80c0d74fcd", "node_type": "1", "metadata": {"window": "These two scoring functions are conceptually similar to the entropy. In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. ", "original_text": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. "}, "hash": "e519bc989a16ec94ab71771beb8f85b4a9d1fd8e3d77adb3a3727359929579d7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42da53ec-3b5d-4bf6-98d0-ef789de5cb84", "node_type": "1", "metadata": {"window": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. ", "original_text": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. "}, "hash": "c1bae9c88ba5d1d7c0c2903ae65f1b0fa56389b5c737a0477d372edd359d8bdb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). ", "mimetype": "text/plain", "start_char_idx": 42734, "end_char_idx": 42993, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42da53ec-3b5d-4bf6-98d0-ef789de5cb84": {"__data__": {"id_": "42da53ec-3b5d-4bf6-98d0-ef789de5cb84", "embedding": null, "metadata": {"window": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. ", "original_text": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "35d63551-ba33-4ebb-aec0-49f214ae7e66", "node_type": "1", "metadata": {"window": "In contrast, similar\nto mutual entropy, variation ratios also looks for disagreement between the models, and it is\ndefined as the fraction of members in the ensemble that do not agree with the majority vote. It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. ", "original_text": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). "}, "hash": "9a55d30ea51897c8b9d65cd0da1c9e6f9881b7e45623a5a682b384ac9ae00703", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "47dbb703-fd1a-41ad-872a-56cdcc4be76e", "node_type": "1", "metadata": {"window": "All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. ", "original_text": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. "}, "hash": "af019319cbf395818b8f445a2a7e5e52f340ede19662354817252897458f097c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. ", "mimetype": "text/plain", "start_char_idx": 42993, "end_char_idx": 43142, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "47dbb703-fd1a-41ad-872a-56cdcc4be76e": {"__data__": {"id_": "47dbb703-fd1a-41ad-872a-56cdcc4be76e", "embedding": null, "metadata": {"window": "All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. ", "original_text": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42da53ec-3b5d-4bf6-98d0-ef789de5cb84", "node_type": "1", "metadata": {"window": "It is\nworth noting that this function has the undesirable property of only returning a finite number\nof values, related to the number of models in the ensemble|E| and the number of classes |K|.\n All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. ", "original_text": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. "}, "hash": "c1bae9c88ba5d1d7c0c2903ae65f1b0fa56389b5c737a0477d372edd359d8bdb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "241a2d14-fa78-4dea-86e6-1bb8df72785e", "node_type": "1", "metadata": {"window": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. ", "original_text": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. "}, "hash": "5ea68e49b5708dd3a9d60b6a6fbe61ef7e818641062d5c845f0e5f0177208633", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. ", "mimetype": "text/plain", "start_char_idx": 43142, "end_char_idx": 43281, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "241a2d14-fa78-4dea-86e6-1bb8df72785e": {"__data__": {"id_": "241a2d14-fa78-4dea-86e6-1bb8df72785e", "embedding": null, "metadata": {"window": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. ", "original_text": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "47dbb703-fd1a-41ad-872a-56cdcc4be76e", "node_type": "1", "metadata": {"window": "All the implemented scoring functions are summarized in Table 3.1.\n Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. ", "original_text": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. "}, "hash": "af019319cbf395818b8f445a2a7e5e52f340ede19662354817252897458f097c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "72caf107-b306-44d8-9ae7-5e1e1bc21ba5", "node_type": "1", "metadata": {"window": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. ", "original_text": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. "}, "hash": "44ea2cc57c410ddaa5dbd487f7da38a5d7c5649e91e380fafd1b385f0f425180", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. ", "mimetype": "text/plain", "start_char_idx": 43281, "end_char_idx": 43361, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "72caf107-b306-44d8-9ae7-5e1e1bc21ba5": {"__data__": {"id_": "72caf107-b306-44d8-9ae7-5e1e1bc21ba5", "embedding": null, "metadata": {"window": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. ", "original_text": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "241a2d14-fa78-4dea-86e6-1bb8df72785e", "node_type": "1", "metadata": {"window": "Table 3.1: Summary of typical scoring functions in uncertainty-based active learning\nLeast confidence LC(p) = top1k\u2208 K(1 \u2212 pk)\nMargin sampling MS(p) =1 \u2212 (top1k\u2208K(pk) \u2212 top2i\u2208K(pk))\nEntropy H(p) = p\u22a4 log(p)\nMutual information J(p) = H(p) \u2212 1\nE \u2211e\u2208E H(p(e))\nVariation ratios V(p) =1 \u2212 1\nE \u2211e\u2208E (arg maxk\u2208K p(e)\nk = M)\nwhere M = modee\u2208E(arg maxk\u2208K p(e)\nk )\n3.2. E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. ", "original_text": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. "}, "hash": "5ea68e49b5708dd3a9d60b6a6fbe61ef7e818641062d5c845f0e5f0177208633", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1351b7b7-a4e7-4d81-a70c-cf97e97973d3", "node_type": "1", "metadata": {"window": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n", "original_text": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. "}, "hash": "12cc4ecb1fcebff01565b4dcc8455fa39c0f4a7faaa249d91cb2bdcbb583be66", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. ", "mimetype": "text/plain", "start_char_idx": 43361, "end_char_idx": 43478, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1351b7b7-a4e7-4d81-a70c-cf97e97973d3": {"__data__": {"id_": "1351b7b7-a4e7-4d81-a70c-cf97e97973d3", "embedding": null, "metadata": {"window": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n", "original_text": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "72caf107-b306-44d8-9ae7-5e1e1bc21ba5", "node_type": "1", "metadata": {"window": "E NSEMBLE CONFIGURATIONS\nAs mentioned above, ensembles are commonly obtained by running multiple trainings (with\ndifferent random seeds), usually in the 5-10 range [32] (although theoretically, the larger the\nensemble, the better the uncertainty estimation). As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. ", "original_text": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. "}, "hash": "44ea2cc57c410ddaa5dbd487f7da38a5d7c5649e91e380fafd1b385f0f425180", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bf18eac3-1a40-4459-bad0-ddd3e216a85a", "node_type": "1", "metadata": {"window": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n", "original_text": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. "}, "hash": "333522c3918422b002ad20289fc921f332c8bb209f1b6185620aa9e8cdf9c36d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. ", "mimetype": "text/plain", "start_char_idx": 43478, "end_char_idx": 43564, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bf18eac3-1a40-4459-bad0-ddd3e216a85a": {"__data__": {"id_": "bf18eac3-1a40-4459-bad0-ddd3e216a85a", "embedding": null, "metadata": {"window": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n", "original_text": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1351b7b7-a4e7-4d81-a70c-cf97e97973d3", "node_type": "1", "metadata": {"window": "As the additional computational cost can be a\nlimitation, many techniques have been proposed to derive ensembles without additional train-\ning runs. A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n", "original_text": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. "}, "hash": "12cc4ecb1fcebff01565b4dcc8455fa39c0f4a7faaa249d91cb2bdcbb583be66", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "384d8554-a949-4192-aed1-83d9b0ed4b2a", "node_type": "1", "metadata": {"window": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. ", "original_text": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. "}, "hash": "415472b29ffd31b3ce9212114d37e2557e32cfbaab38eddd2b9faa33cd9c50b1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. ", "mimetype": "text/plain", "start_char_idx": 43564, "end_char_idx": 43647, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "384d8554-a949-4192-aed1-83d9b0ed4b2a": {"__data__": {"id_": "384d8554-a949-4192-aed1-83d9b0ed4b2a", "embedding": null, "metadata": {"window": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. ", "original_text": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bf18eac3-1a40-4459-bad0-ddd3e216a85a", "node_type": "1", "metadata": {"window": "A well known example is Monte-Carlo drop-out [33], where drop-out is applied at\ntest-time and multiple inferences are run for each sample. However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n", "original_text": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. "}, "hash": "333522c3918422b002ad20289fc921f332c8bb209f1b6185620aa9e8cdf9c36d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "778a92e3-c67a-4c2f-899a-284823717b0a", "node_type": "1", "metadata": {"window": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n", "original_text": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. "}, "hash": "7a6dbe2d7e123fdc58257a436486c628dd0b2f621adc6fac59e5c9e55145ce04", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. ", "mimetype": "text/plain", "start_char_idx": 43647, "end_char_idx": 43860, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "778a92e3-c67a-4c2f-899a-284823717b0a": {"__data__": {"id_": "778a92e3-c67a-4c2f-899a-284823717b0a", "embedding": null, "metadata": {"window": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n", "original_text": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "384d8554-a949-4192-aed1-83d9b0ed4b2a", "node_type": "1", "metadata": {"window": "However, it has been shown that\n22\n\nMC-dropout suffers from mode collapse, i.e. it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. ", "original_text": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. "}, "hash": "415472b29ffd31b3ce9212114d37e2557e32cfbaab38eddd2b9faa33cd9c50b1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1", "node_type": "1", "metadata": {"window": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n", "original_text": "The configurations are\nsummarized in Table 3.2.\n"}, "hash": "25dd1ff4e5c77ed0bb852f4a3d86896f72de38261feba601c467af39819c41df", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. ", "mimetype": "text/plain", "start_char_idx": 43860, "end_char_idx": 44049, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1": {"__data__": {"id_": "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1", "embedding": null, "metadata": {"window": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n", "original_text": "The configurations are\nsummarized in Table 3.2.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "778a92e3-c67a-4c2f-899a-284823717b0a", "node_type": "1", "metadata": {"window": "it can lead to a very imbalanced dataset by fa-\nvoring a specific class during the active learning process [34, 35]. In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n", "original_text": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. "}, "hash": "7a6dbe2d7e123fdc58257a436486c628dd0b2f621adc6fac59e5c9e55145ce04", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2f1d1648-bc76-4143-a663-c5db3f03a01f", "node_type": "1", "metadata": {"window": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . ", "original_text": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n"}, "hash": "173cfdd1358fc5015c1f3a39e380403e186a299ec6fe1884d72f073bcc0948ba", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The configurations are\nsummarized in Table 3.2.\n", "mimetype": "text/plain", "start_char_idx": 44049, "end_char_idx": 44097, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2f1d1648-bc76-4143-a663-c5db3f03a01f": {"__data__": {"id_": "2f1d1648-bc76-4143-a663-c5db3f03a01f", "embedding": null, "metadata": {"window": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . ", "original_text": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1", "node_type": "1", "metadata": {"window": "In contrast, in the reported\nstudies, ensembles are able to counteract the imbalance. An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n", "original_text": "The configurations are\nsummarized in Table 3.2.\n"}, "hash": "25dd1ff4e5c77ed0bb852f4a3d86896f72de38261feba601c467af39819c41df", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "37931bc3-97c6-4a26-bc5c-300b56bc7ba8", "node_type": "1", "metadata": {"window": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . ", "original_text": "5best An ensemble of five models, each from a different training run. "}, "hash": "b0fcfca6fb3893c6cb6e491d8d6bc11fd4e90db8a3ef562a45369f29842e2dce", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n", "mimetype": "text/plain", "start_char_idx": 44097, "end_char_idx": 44220, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "37931bc3-97c6-4a26-bc5c-300b56bc7ba8": {"__data__": {"id_": "37931bc3-97c6-4a26-bc5c-300b56bc7ba8", "embedding": null, "metadata": {"window": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . ", "original_text": "5best An ensemble of five models, each from a different training run. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2f1d1648-bc76-4143-a663-c5db3f03a01f", "node_type": "1", "metadata": {"window": "An interesting approach to derive en-\nsembles with no extra cost is given in [36]. In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . ", "original_text": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n"}, "hash": "173cfdd1358fc5015c1f3a39e380403e186a299ec6fe1884d72f073bcc0948ba", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "29e6dd38-d85e-407d-ab21-1bf999ba448c", "node_type": "1", "metadata": {"window": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. ", "original_text": "The best\ncheckpoint is used for each run.\n"}, "hash": "bac530a48ae7c2b761dbb50a460dbb631fd434b245f672f416b5319df37ee6ad", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "5best An ensemble of five models, each from a different training run. ", "mimetype": "text/plain", "start_char_idx": 44220, "end_char_idx": 44290, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "29e6dd38-d85e-407d-ab21-1bf999ba448c": {"__data__": {"id_": "29e6dd38-d85e-407d-ab21-1bf999ba448c", "embedding": null, "metadata": {"window": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. ", "original_text": "The best\ncheckpoint is used for each run.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "37931bc3-97c6-4a26-bc5c-300b56bc7ba8", "node_type": "1", "metadata": {"window": "In this work, the disagreement between checkpoints\nstored during successive training epochs (due to the catastrophic forgetting property of DNNs)\nis exploited to efficiently construct large and diverse ensembles. While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . ", "original_text": "5best An ensemble of five models, each from a different training run. "}, "hash": "b0fcfca6fb3893c6cb6e491d8d6bc11fd4e90db8a3ef562a45369f29842e2dce", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84", "node_type": "1", "metadata": {"window": "The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n", "original_text": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n"}, "hash": "b479a17c0004465fccc1e1b72a8ce6a564ec2247f0dbd780cf1246611fef28b3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The best\ncheckpoint is used for each run.\n", "mimetype": "text/plain", "start_char_idx": 44290, "end_char_idx": 44332, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84": {"__data__": {"id_": "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84", "embedding": null, "metadata": {"window": "The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n", "original_text": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "29e6dd38-d85e-407d-ab21-1bf999ba448c", "node_type": "1", "metadata": {"window": "While the high computational\ncost associated to typical ensembles doesn\u2019t seem to be an issue at MILA, the encouraging\nresults obtained in [36] motivate exploring different configurations. The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. ", "original_text": "The best\ncheckpoint is used for each run.\n"}, "hash": "bac530a48ae7c2b761dbb50a460dbb631fd434b245f672f416b5319df37ee6ad", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5e100849-6a24-4de3-b941-2efb8a7e1dcd", "node_type": "1", "metadata": {"window": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. ", "original_text": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . "}, "hash": "9bf3a33d687c69c71a7640797485ae5fa356561977cb6bb05a32409b3dbb977f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n", "mimetype": "text/plain", "start_char_idx": 44332, "end_char_idx": 44419, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5e100849-6a24-4de3-b941-2efb8a7e1dcd": {"__data__": {"id_": "5e100849-6a24-4de3-b941-2efb8a7e1dcd", "embedding": null, "metadata": {"window": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. ", "original_text": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84", "node_type": "1", "metadata": {"window": "The configurations are\nsummarized in Table 3.2.\n Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n", "original_text": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n"}, "hash": "b479a17c0004465fccc1e1b72a8ce6a564ec2247f0dbd780cf1246611fef28b3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a", "node_type": "1", "metadata": {"window": "5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. ", "original_text": ". "}, "hash": "d1a678036b85bada66320879a39074bfc1556e03689b8e72379244ff99a4909d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . ", "mimetype": "text/plain", "start_char_idx": 44419, "end_char_idx": 44486, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a": {"__data__": {"id_": "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a", "embedding": null, "metadata": {"window": "5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. ", "original_text": ". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5e100849-6a24-4de3-b941-2efb8a7e1dcd", "node_type": "1", "metadata": {"window": "Table 3.2: Summary of the ensemble configurations considered in this study\nName Description\nsingle A single model is used.\n 5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. ", "original_text": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . "}, "hash": "9bf3a33d687c69c71a7640797485ae5fa356561977cb6bb05a32409b3dbb977f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b5f742d0-3a6f-4fb3-98be-c8b77f829063", "node_type": "1", "metadata": {"window": "The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. ", "original_text": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. "}, "hash": "2d907252f0adcb5cc48abed0a1fde2ee38f303bf423c4285dbbcb7507717874c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ". ", "mimetype": "text/plain", "start_char_idx": 227, "end_char_idx": 229, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b5f742d0-3a6f-4fb3-98be-c8b77f829063": {"__data__": {"id_": "b5f742d0-3a6f-4fb3-98be-c8b77f829063", "embedding": null, "metadata": {"window": "The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. ", "original_text": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a", "node_type": "1", "metadata": {"window": "5best An ensemble of five models, each from a different training run. The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. ", "original_text": ". "}, "hash": "d1a678036b85bada66320879a39074bfc1556e03689b8e72379244ff99a4909d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64", "node_type": "1", "metadata": {"window": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. ", "original_text": "This is a subset of 20last.\n"}, "hash": "a0cda1fa5a65dd885745eaa2c9d683bc98ea2aa65607779a0d449bc16aaa4574", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. ", "mimetype": "text/plain", "start_char_idx": 44488, "end_char_idx": 44549, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64": {"__data__": {"id_": "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64", "embedding": null, "metadata": {"window": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. ", "original_text": "This is a subset of 20last.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b5f742d0-3a6f-4fb3-98be-c8b77f829063", "node_type": "1", "metadata": {"window": "The best\ncheckpoint is used for each run.\n 20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. ", "original_text": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. "}, "hash": "2d907252f0adcb5cc48abed0a1fde2ee38f303bf423c4285dbbcb7507717874c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4d58f025-3af0-4221-a8c5-43da05af3b58", "node_type": "1", "metadata": {"window": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. ", "original_text": "3.3. "}, "hash": "f87004375ec7b1027db4adb911d1eac58857329a159dd5562dd5e69ec0831a53", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This is a subset of 20last.\n", "mimetype": "text/plain", "start_char_idx": 44549, "end_char_idx": 44577, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4d58f025-3af0-4221-a8c5-43da05af3b58": {"__data__": {"id_": "4d58f025-3af0-4221-a8c5-43da05af3b58", "embedding": null, "metadata": {"window": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. ", "original_text": "3.3. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64", "node_type": "1", "metadata": {"window": "20last An ensemble of twenty models, obtained at the 20 last epochs of a training run.\n 5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. ", "original_text": "This is a subset of 20last.\n"}, "hash": "a0cda1fa5a65dd885745eaa2c9d683bc98ea2aa65607779a0d449bc16aaa4574", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fca08b82-c7d6-4d24-98f0-8411111f33b1", "node_type": "1", "metadata": {"window": ". .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. ", "original_text": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. "}, "hash": "5960d3b66a8f3fd418904aae9406cda565cbcb6100762edf64274dbeee427d06", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "3.3. ", "mimetype": "text/plain", "start_char_idx": 44577, "end_char_idx": 44582, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fca08b82-c7d6-4d24-98f0-8411111f33b1": {"__data__": {"id_": "fca08b82-c7d6-4d24-98f0-8411111f33b1", "embedding": null, "metadata": {"window": ". .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. ", "original_text": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4d58f025-3af0-4221-a8c5-43da05af3b58", "node_type": "1", "metadata": {"window": "5ckpt An ensemble of five models, obtained from epochs N, N \u2212 5, . . .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. ", "original_text": "3.3. "}, "hash": "f87004375ec7b1027db4adb911d1eac58857329a159dd5562dd5e69ec0831a53", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fbd4ea09-d525-434a-be61-7773af642d22", "node_type": "1", "metadata": {"window": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. ", "original_text": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. "}, "hash": "dc5b40a5b4be7f09036d3eba5024dd30fbc213e84d960517098443a0c8e53556", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. ", "mimetype": "text/plain", "start_char_idx": 44582, "end_char_idx": 44820, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fbd4ea09-d525-434a-be61-7773af642d22": {"__data__": {"id_": "fbd4ea09-d525-434a-be61-7773af642d22", "embedding": null, "metadata": {"window": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. ", "original_text": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fca08b82-c7d6-4d24-98f0-8411111f33b1", "node_type": "1", "metadata": {"window": ". .N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. ", "original_text": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. "}, "hash": "5960d3b66a8f3fd418904aae9406cda565cbcb6100762edf64274dbeee427d06", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fadcd762-e251-4528-b598-7c4d8983ed21", "node_type": "1", "metadata": {"window": "This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. ", "original_text": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. "}, "hash": "0fad268e879943e882c433b6c6a0a709c9250b0c38eb9b9edcb2efbdfe2a9b7e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. ", "mimetype": "text/plain", "start_char_idx": 44820, "end_char_idx": 44962, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fadcd762-e251-4528-b598-7c4d8983ed21": {"__data__": {"id_": "fadcd762-e251-4528-b598-7c4d8983ed21", "embedding": null, "metadata": {"window": "This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. ", "original_text": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fbd4ea09-d525-434a-be61-7773af642d22", "node_type": "1", "metadata": {"window": ".N \u2212 20 of a single\ntraining run, where N is the last epoch. This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. ", "original_text": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. "}, "hash": "dc5b40a5b4be7f09036d3eba5024dd30fbc213e84d960517098443a0c8e53556", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "71960f4d-b5dc-4073-9207-da469975e416", "node_type": "1", "metadata": {"window": "3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. ", "original_text": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. "}, "hash": "0f02d3c3110dbdb8d9c26cc9aaf2209f763e01147a1e4111636886c533ec2488", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. ", "mimetype": "text/plain", "start_char_idx": 44962, "end_char_idx": 45091, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "71960f4d-b5dc-4073-9207-da469975e416": {"__data__": {"id_": "71960f4d-b5dc-4073-9207-da469975e416", "embedding": null, "metadata": {"window": "3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. ", "original_text": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fadcd762-e251-4528-b598-7c4d8983ed21", "node_type": "1", "metadata": {"window": "This is a subset of 20last.\n 3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. ", "original_text": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. "}, "hash": "0fad268e879943e882c433b6c6a0a709c9250b0c38eb9b9edcb2efbdfe2a9b7e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb", "node_type": "1", "metadata": {"window": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n", "original_text": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. "}, "hash": "13bc99e89878fea32090d288e1cd7b0747c69f9e890bb7a895df42d87819c38a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. ", "mimetype": "text/plain", "start_char_idx": 45091, "end_char_idx": 45226, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb": {"__data__": {"id_": "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb", "embedding": null, "metadata": {"window": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n", "original_text": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "71960f4d-b5dc-4073-9207-da469975e416", "node_type": "1", "metadata": {"window": "3.3. M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. ", "original_text": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. "}, "hash": "0f02d3c3110dbdb8d9c26cc9aaf2209f763e01147a1e4111636886c533ec2488", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c5a7c9e5-69ae-48dd-978f-e82b6defd827", "node_type": "1", "metadata": {"window": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. ", "original_text": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. "}, "hash": "1f53884a1a46f76cffccf6192ebec3b43d5f631b626125d7d6ea60014478e146", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. ", "mimetype": "text/plain", "start_char_idx": 45226, "end_char_idx": 45337, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c5a7c9e5-69ae-48dd-978f-e82b6defd827": {"__data__": {"id_": "c5a7c9e5-69ae-48dd-978f-e82b6defd827", "embedding": null, "metadata": {"window": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. ", "original_text": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb", "node_type": "1", "metadata": {"window": "M ETHODS\nThe standard procedure to evaluate active learning techniques follows the typical AL work-\nflow, as described in Algorithm 1, with the difference that labels are usually already available\nfor all the data used in the experiment. Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n", "original_text": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. "}, "hash": "13bc99e89878fea32090d288e1cd7b0747c69f9e890bb7a895df42d87819c38a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "34fcb400-073d-4b70-9fde-5fab73e9365e", "node_type": "1", "metadata": {"window": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. ", "original_text": "Random selection is used as a baseline. "}, "hash": "473ad021366158aa8a711e4501b9e4fefd20ccd30df5002f6d8713c68025786b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. ", "mimetype": "text/plain", "start_char_idx": 45337, "end_char_idx": 45452, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "34fcb400-073d-4b70-9fde-5fab73e9365e": {"__data__": {"id_": "34fcb400-073d-4b70-9fde-5fab73e9365e", "embedding": null, "metadata": {"window": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. ", "original_text": "Random selection is used as a baseline. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c5a7c9e5-69ae-48dd-978f-e82b6defd827", "node_type": "1", "metadata": {"window": "Initially, the whole dataset is split into three: an initial\ntraining datasetL, a large pool of data U, and a left-out dataset T for testing. The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. ", "original_text": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. "}, "hash": "1f53884a1a46f76cffccf6192ebec3b43d5f631b626125d7d6ea60014478e146", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5a7c92e8-71cc-4887-9e9c-49d1a639b382", "node_type": "1", "metadata": {"window": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". ", "original_text": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. "}, "hash": "7b191ef53d9a7d820598c55e541541a0a6f2d326a2d64bcd5f3cb1d47e41c91c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Random selection is used as a baseline. ", "mimetype": "text/plain", "start_char_idx": 45452, "end_char_idx": 45492, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5a7c92e8-71cc-4887-9e9c-49d1a639b382": {"__data__": {"id_": "5a7c92e8-71cc-4887-9e9c-49d1a639b382", "embedding": null, "metadata": {"window": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". ", "original_text": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "34fcb400-073d-4b70-9fde-5fab73e9365e", "node_type": "1", "metadata": {"window": "The model M\n(or the ensemble E) is trained on L, evaluated on T, and used to score the pool of data U with\nthe desired function. A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. ", "original_text": "Random selection is used as a baseline. "}, "hash": "473ad021366158aa8a711e4501b9e4fefd20ccd30df5002f6d8713c68025786b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0b127134-e4d4-4c6c-a36f-85e8f0393045", "node_type": "1", "metadata": {"window": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. ", "original_text": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n"}, "hash": "65ac2946d23ad786f56772267b15d0e877504e6037b0967b47e3fa28b62dd544", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. ", "mimetype": "text/plain", "start_char_idx": 45492, "end_char_idx": 45677, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0b127134-e4d4-4c6c-a36f-85e8f0393045": {"__data__": {"id_": "0b127134-e4d4-4c6c-a36f-85e8f0393045", "embedding": null, "metadata": {"window": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. ", "original_text": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5a7c92e8-71cc-4887-9e9c-49d1a639b382", "node_type": "1", "metadata": {"window": "A number of samples is selected from U accordingly, and added to L. M\n(or E) is retrained on the updated L, evaluated on T, and so on. The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". ", "original_text": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. "}, "hash": "7b191ef53d9a7d820598c55e541541a0a6f2d326a2d64bcd5f3cb1d47e41c91c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a9134fcd-a411-4f98-9779-eee1e98b8b5b", "node_type": "1", "metadata": {"window": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. ", "original_text": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. "}, "hash": "63b05326c349eda0c862aff66e9b0d6bb6d7dfbe9f35e3cb47f2318cf9d50f5b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n", "mimetype": "text/plain", "start_char_idx": 45677, "end_char_idx": 45816, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a9134fcd-a411-4f98-9779-eee1e98b8b5b": {"__data__": {"id_": "a9134fcd-a411-4f98-9779-eee1e98b8b5b", "embedding": null, "metadata": {"window": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. ", "original_text": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0b127134-e4d4-4c6c-a36f-85e8f0393045", "node_type": "1", "metadata": {"window": "The performance measured\nat each iteration is usually plotted as a function of the number of training samples. Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. ", "original_text": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n"}, "hash": "65ac2946d23ad786f56772267b15d0e877504e6037b0967b47e3fa28b62dd544", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "22e15aeb-1c90-4bec-aba7-21d322c73146", "node_type": "1", "metadata": {"window": "Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. ", "original_text": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. "}, "hash": "abafd02db710e75453052af575218cc0e7fcdbc1c4c66d7edb3e0219ac0b72e1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. ", "mimetype": "text/plain", "start_char_idx": 45816, "end_char_idx": 46030, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "22e15aeb-1c90-4bec-aba7-21d322c73146": {"__data__": {"id_": "22e15aeb-1c90-4bec-aba7-21d322c73146", "embedding": null, "metadata": {"window": "Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. ", "original_text": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a9134fcd-a411-4f98-9779-eee1e98b8b5b", "node_type": "1", "metadata": {"window": "Hence, the\nresult of the experiment is a plot with several curves, each corresponding to one acquisition\nfunction. Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. ", "original_text": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. "}, "hash": "63b05326c349eda0c862aff66e9b0d6bb6d7dfbe9f35e3cb47f2318cf9d50f5b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "77e32929-73fd-43fa-a6c4-d41df96f366c", "node_type": "1", "metadata": {"window": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. ", "original_text": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". "}, "hash": "f4b4c92ec76169ce5695dc7cc2daddb856125f094c2145d03f9d72c4268c1782", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. ", "mimetype": "text/plain", "start_char_idx": 46030, "end_char_idx": 46142, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "77e32929-73fd-43fa-a6c4-d41df96f366c": {"__data__": {"id_": "77e32929-73fd-43fa-a6c4-d41df96f366c", "embedding": null, "metadata": {"window": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. ", "original_text": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "22e15aeb-1c90-4bec-aba7-21d322c73146", "node_type": "1", "metadata": {"window": "Random selection is used as a baseline. With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. ", "original_text": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. "}, "hash": "abafd02db710e75453052af575218cc0e7fcdbc1c4c66d7edb3e0219ac0b72e1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1fcc4b26-1815-499e-b8b9-2c7b5d53871d", "node_type": "1", "metadata": {"window": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. ", "original_text": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. "}, "hash": "59a2b046f9c1274ab927e6550066cbb485fe7495f07d4cd54be3d15413667e5b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". ", "mimetype": "text/plain", "start_char_idx": 46142, "end_char_idx": 46378, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1fcc4b26-1815-499e-b8b9-2c7b5d53871d": {"__data__": {"id_": "1fcc4b26-1815-499e-b8b9-2c7b5d53871d", "embedding": null, "metadata": {"window": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. ", "original_text": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "77e32929-73fd-43fa-a6c4-d41df96f366c", "node_type": "1", "metadata": {"window": "With effective active learning techniques, the\nmodel is able to closely approach or even reach the performance of a model trained on the full\ndataset, with only a fraction of the data. The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. ", "original_text": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". "}, "hash": "f4b4c92ec76169ce5695dc7cc2daddb856125f094c2145d03f9d72c4268c1782", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "04467916-4416-4ccf-b050-6170335a3157", "node_type": "1", "metadata": {"window": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n", "original_text": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. "}, "hash": "7ac3ffb7b7a1a01abfcebf60c7d833401c57d6d5accab763c4b7246a4ddce8f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. ", "mimetype": "text/plain", "start_char_idx": 46378, "end_char_idx": 46707, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "04467916-4416-4ccf-b050-6170335a3157": {"__data__": {"id_": "04467916-4416-4ccf-b050-6170335a3157", "embedding": null, "metadata": {"window": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n", "original_text": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1fcc4b26-1815-499e-b8b9-2c7b5d53871d", "node_type": "1", "metadata": {"window": "The size of the initial training dataset and the number\nof samples added at each iteration are the main hyperparameters of the experiment.\n While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. ", "original_text": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. "}, "hash": "59a2b046f9c1274ab927e6550066cbb485fe7495f07d4cd54be3d15413667e5b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "dcb0925a-7174-46be-b129-6ec42e64675b", "node_type": "1", "metadata": {"window": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). ", "original_text": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. "}, "hash": "3bb470f72147609c85ae9614c7265a896af209856e14c3bc1176a12db12b96c8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. ", "mimetype": "text/plain", "start_char_idx": 46707, "end_char_idx": 46804, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "dcb0925a-7174-46be-b129-6ec42e64675b": {"__data__": {"id_": "dcb0925a-7174-46be-b129-6ec42e64675b", "embedding": null, "metadata": {"window": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). ", "original_text": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "04467916-4416-4ccf-b050-6170335a3157", "node_type": "1", "metadata": {"window": "While the initial intended use for active learning was to select images from the moth traps, it\nbecame evident that ongoing efforts to label the images would not deliver enough data within\nthe time at my disposal. However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n", "original_text": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. "}, "hash": "7ac3ffb7b7a1a01abfcebf60c7d833401c57d6d5accab763c4b7246a4ddce8f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "832b9eee-2266-4b1d-8b6b-64c35a4a1b90", "node_type": "1", "metadata": {"window": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. ", "original_text": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. "}, "hash": "9ee5d5d5fcb0fc8deebbae52c15f50e76521b6c92e977d55542db3d55cb8268c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. ", "mimetype": "text/plain", "start_char_idx": 46804, "end_char_idx": 46985, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "832b9eee-2266-4b1d-8b6b-64c35a4a1b90": {"__data__": {"id_": "832b9eee-2266-4b1d-8b6b-64c35a4a1b90", "embedding": null, "metadata": {"window": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. ", "original_text": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "dcb0925a-7174-46be-b129-6ec42e64675b", "node_type": "1", "metadata": {"window": "However, it was still deemed valuable to develop the active learn-\ning framework and to test it on GBIF images. In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). ", "original_text": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. "}, "hash": "3bb470f72147609c85ae9614c7265a896af209856e14c3bc1176a12db12b96c8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "92e31890-eadb-4919-a7db-87bed8926fb3", "node_type": "1", "metadata": {"window": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. ", "original_text": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. "}, "hash": "5c597d2e51f7b6b4f1575ed19b94b7345fc97bc879debe5f1c20044d8fe308d2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. ", "mimetype": "text/plain", "start_char_idx": 46985, "end_char_idx": 47090, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "92e31890-eadb-4919-a7db-87bed8926fb3": {"__data__": {"id_": "92e31890-eadb-4919-a7db-87bed8926fb3", "embedding": null, "metadata": {"window": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. ", "original_text": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "832b9eee-2266-4b1d-8b6b-64c35a4a1b90", "node_type": "1", "metadata": {"window": "In fact, as the standard evaluation procedure\nsuggests, besides effectively augmenting training datasets, active learning can also be used to\neffectively reduce training datasets: this operation is called \"training data subset search\". In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. ", "original_text": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. "}, "hash": "9ee5d5d5fcb0fc8deebbae52c15f50e76521b6c92e977d55542db3d55cb8268c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ae96b1b5-7ba0-4ab0-9731-24197ba56570", "node_type": "1", "metadata": {"window": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n", "original_text": "The exploration of this method is left as a future research direction.\n"}, "hash": "dc306354c04fc4cfb7ea4aa069eb56c3f34bfd08ea89fb50c4ab8dd5a3b0b8fb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. ", "mimetype": "text/plain", "start_char_idx": 47090, "end_char_idx": 47268, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ae96b1b5-7ba0-4ab0-9731-24197ba56570": {"__data__": {"id_": "ae96b1b5-7ba0-4ab0-9731-24197ba56570", "embedding": null, "metadata": {"window": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n", "original_text": "The exploration of this method is left as a future research direction.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "92e31890-eadb-4919-a7db-87bed8926fb3", "node_type": "1", "metadata": {"window": "In\n[36], using only half of the data, an interesting trick is used to achieve better performance than\n23\n\nthe model trained on the full dataset, which is highly-imbalanced: at each active learning it-\neration, scores are also computed for samples that are already in the training dataset; these\ncan eventually be selected again. At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. ", "original_text": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. "}, "hash": "5c597d2e51f7b6b4f1575ed19b94b7345fc97bc879debe5f1c20044d8fe308d2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa", "node_type": "1", "metadata": {"window": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. ", "original_text": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). "}, "hash": "368fb24a24f52846b9f73d1f666d4ad652692af011185bae816e12b87299c053", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The exploration of this method is left as a future research direction.\n", "mimetype": "text/plain", "start_char_idx": 47268, "end_char_idx": 47339, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa": {"__data__": {"id_": "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa", "embedding": null, "metadata": {"window": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. ", "original_text": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ae96b1b5-7ba0-4ab0-9731-24197ba56570", "node_type": "1", "metadata": {"window": "At the last iteration, the vast majority of the selected subset is\nsamples with multiple copies. The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n", "original_text": "The exploration of this method is left as a future research direction.\n"}, "hash": "dc306354c04fc4cfb7ea4aa069eb56c3f34bfd08ea89fb50c4ab8dd5a3b0b8fb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "02b11afa-ce2e-4aa2-9253-c1fe35302bd0", "node_type": "1", "metadata": {"window": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. ", "original_text": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. "}, "hash": "2ddc776972f1f86b67d651c8c493b235f7ff265b6a22ca1d5fac566c3ccf000e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). ", "mimetype": "text/plain", "start_char_idx": 47339, "end_char_idx": 47558, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "02b11afa-ce2e-4aa2-9253-c1fe35302bd0": {"__data__": {"id_": "02b11afa-ce2e-4aa2-9253-c1fe35302bd0", "embedding": null, "metadata": {"window": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. ", "original_text": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa", "node_type": "1", "metadata": {"window": "The number of unique images is less than half of those in the full\ntraining dataset, yet the model trained on the selected subset outperforms the model trained on\nthe full dataset. It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. ", "original_text": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). "}, "hash": "368fb24a24f52846b9f73d1f666d4ad652692af011185bae816e12b87299c053", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "409097e6-8d73-4a44-b2a6-56078b988ac6", "node_type": "1", "metadata": {"window": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n", "original_text": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. "}, "hash": "8352d050cfb7aaa8e4b4d0320b44b94fa8e5f6663b5886cbbc4f611784a0eb5d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. ", "mimetype": "text/plain", "start_char_idx": 47558, "end_char_idx": 47727, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "409097e6-8d73-4a44-b2a6-56078b988ac6": {"__data__": {"id_": "409097e6-8d73-4a44-b2a6-56078b988ac6", "embedding": null, "metadata": {"window": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n", "original_text": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "02b11afa-ce2e-4aa2-9253-c1fe35302bd0", "node_type": "1", "metadata": {"window": "It is noted that the effectiveness of the approach is due to its ability to counter\nthe class imbalance. While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. ", "original_text": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. "}, "hash": "2ddc776972f1f86b67d651c8c493b235f7ff265b6a22ca1d5fac566c3ccf000e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "119e3691-4bb7-4980-b1df-bf4a55324d6c", "node_type": "1", "metadata": {"window": "The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. ", "original_text": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n"}, "hash": "4fc4fa2c9df98ccda3d131f7a21974d1b2b0e43a0aa495fb4a96f1f7afcecd6c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. ", "mimetype": "text/plain", "start_char_idx": 47727, "end_char_idx": 47835, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "119e3691-4bb7-4980-b1df-bf4a55324d6c": {"__data__": {"id_": "119e3691-4bb7-4980-b1df-bf4a55324d6c", "embedding": null, "metadata": {"window": "The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. ", "original_text": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "409097e6-8d73-4a44-b2a6-56078b988ac6", "node_type": "1", "metadata": {"window": "While this experiment was performed in the context of object detection,\nthis approach seems very relevant for our classification problem, as we also face severe class\nimbalance. The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n", "original_text": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. "}, "hash": "8352d050cfb7aaa8e4b4d0320b44b94fa8e5f6663b5886cbbc4f611784a0eb5d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bb24e12a-242f-46e0-af3b-8fdf8d5758fe", "node_type": "1", "metadata": {"window": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. ", "original_text": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. "}, "hash": "92b10e70808ab5f7a12a3483588bc9561c5ec1cd6d51c2941dc4dfce06a10e56", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n", "mimetype": "text/plain", "start_char_idx": 47835, "end_char_idx": 47993, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bb24e12a-242f-46e0-af3b-8fdf8d5758fe": {"__data__": {"id_": "bb24e12a-242f-46e0-af3b-8fdf8d5758fe", "embedding": null, "metadata": {"window": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. ", "original_text": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "119e3691-4bb7-4980-b1df-bf4a55324d6c", "node_type": "1", "metadata": {"window": "The exploration of this method is left as a future research direction.\n The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. ", "original_text": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n"}, "hash": "4fc4fa2c9df98ccda3d131f7a21974d1b2b0e43a0aa495fb4a96f1f7afcecd6c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "936b39c1-b714-4718-961b-53dc7797d09d", "node_type": "1", "metadata": {"window": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. ", "original_text": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. "}, "hash": "4771933b5b1ff3722859447f17b6147e4e8e0aecce7603f2e3b3500bce6d3f47", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. ", "mimetype": "text/plain", "start_char_idx": 47993, "end_char_idx": 48260, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "936b39c1-b714-4718-961b-53dc7797d09d": {"__data__": {"id_": "936b39c1-b714-4718-961b-53dc7797d09d", "embedding": null, "metadata": {"window": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. ", "original_text": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bb24e12a-242f-46e0-af3b-8fdf8d5758fe", "node_type": "1", "metadata": {"window": "The dataset used for the experiment consists of around 600k GBIF images gathered for the\nmoth species classifier deployed in Quebec and Vermont (the two neighboring states share\nthe same list of 3150 relevant species). 20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. ", "original_text": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. "}, "hash": "92b10e70808ab5f7a12a3483588bc9561c5ec1cd6d51c2941dc4dfce06a10e56", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17b9cc56-82a1-4841-83ee-5dce2aa40635", "node_type": "1", "metadata": {"window": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. ", "original_text": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n"}, "hash": "42a50d97ee5f5b48b71a58aa8ab5f6937e08c2cfcd237da97983a161d3c93f06", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. ", "mimetype": "text/plain", "start_char_idx": 48260, "end_char_idx": 48409, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17b9cc56-82a1-4841-83ee-5dce2aa40635": {"__data__": {"id_": "17b9cc56-82a1-4841-83ee-5dce2aa40635", "embedding": null, "metadata": {"window": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. ", "original_text": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "936b39c1-b714-4718-961b-53dc7797d09d", "node_type": "1", "metadata": {"window": "20% is allocated for the initial training dataset L, 20%\nis left-out for testing, and the remaining 60% constitutes the pool of data U from which to\nselect new samples. Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. ", "original_text": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. "}, "hash": "4771933b5b1ff3722859447f17b6147e4e8e0aecce7603f2e3b3500bce6d3f47", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "31b01bf0-3307-492c-afe0-7229c3d85fa3", "node_type": "1", "metadata": {"window": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. ", "original_text": "3.4. "}, "hash": "0ca4687bbe7d588facfd6bb206dd8da8c064f4651a16958be18d499ce96638bb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n", "mimetype": "text/plain", "start_char_idx": 48409, "end_char_idx": 48517, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "31b01bf0-3307-492c-afe0-7229c3d85fa3": {"__data__": {"id_": "31b01bf0-3307-492c-afe0-7229c3d85fa3", "embedding": null, "metadata": {"window": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. ", "original_text": "3.4. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17b9cc56-82a1-4841-83ee-5dce2aa40635", "node_type": "1", "metadata": {"window": "Due to the time constraints, only the first iteration of a typical active\nlearning evaluation is performed. The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. ", "original_text": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n"}, "hash": "42a50d97ee5f5b48b71a58aa8ab5f6937e08c2cfcd237da97983a161d3c93f06", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8b2f129e-4481-44b9-adf7-fb2720e09ced", "node_type": "1", "metadata": {"window": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. ", "original_text": "R ESULTS\n3.4.1. "}, "hash": "10b46d83e7c37cc2eed6c483738771052e4bcfe5fa09d0d177ba81bf881f2dbf", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "3.4. ", "mimetype": "text/plain", "start_char_idx": 48517, "end_char_idx": 48522, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8b2f129e-4481-44b9-adf7-fb2720e09ced": {"__data__": {"id_": "8b2f129e-4481-44b9-adf7-fb2720e09ced", "embedding": null, "metadata": {"window": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. ", "original_text": "R ESULTS\n3.4.1. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "31b01bf0-3307-492c-afe0-7229c3d85fa3", "node_type": "1", "metadata": {"window": "The ensemble configurations listed in Table 3.2 are created by\nlaunching five trainings onL with different random seeds, and saving the relevant checkpoints.\n The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. ", "original_text": "3.4. "}, "hash": "0ca4687bbe7d588facfd6bb206dd8da8c064f4651a16958be18d499ce96638bb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "24859b29-a3ea-42c7-9577-f3e5f708c844", "node_type": "1", "metadata": {"window": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. ", "original_text": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. "}, "hash": "02de2fb0fbeacf9567bb60c41a19c14fbc2e4b0cbfcc79b5c70f6453f45fd813", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "R ESULTS\n3.4.1. ", "mimetype": "text/plain", "start_char_idx": 48522, "end_char_idx": 48538, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "24859b29-a3ea-42c7-9577-f3e5f708c844": {"__data__": {"id_": "24859b29-a3ea-42c7-9577-f3e5f708c844", "embedding": null, "metadata": {"window": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. ", "original_text": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8b2f129e-4481-44b9-adf7-fb2720e09ced", "node_type": "1", "metadata": {"window": "The model is a standard ResNet-50, and the training recipe (learning rate schedule, weight\ndecay, batch size, data augmentations...) is the same as that of the deployed model, only the\nnumber of epochs being adjusted given the reduction in the training dataset size. A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. ", "original_text": "R ESULTS\n3.4.1. "}, "hash": "10b46d83e7c37cc2eed6c483738771052e4bcfe5fa09d0d177ba81bf881f2dbf", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "36570aa1-8771-4b18-b9d2-0c7d42b87ccb", "node_type": "1", "metadata": {"window": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. ", "original_text": "First, we can look at scores distributions. "}, "hash": "cc9d15ac04d44669197d3661d0d0433364b78c8b7f9252186614cb1700f1e0f2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. ", "mimetype": "text/plain", "start_char_idx": 48538, "end_char_idx": 48666, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "36570aa1-8771-4b18-b9d2-0c7d42b87ccb": {"__data__": {"id_": "36570aa1-8771-4b18-b9d2-0c7d42b87ccb", "embedding": null, "metadata": {"window": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. ", "original_text": "First, we can look at scores distributions. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "24859b29-a3ea-42c7-9577-f3e5f708c844", "node_type": "1", "metadata": {"window": "A python\nscript was developed to score GBIF images with any given scoring function listed in Table 3.1,\ngiven an arbitrary list of ResNet-50 models. With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. ", "original_text": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. "}, "hash": "02de2fb0fbeacf9567bb60c41a19c14fbc2e4b0cbfcc79b5c70f6453f45fd813", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e4914e87-fae5-421e-ad75-3c21a73a6665", "node_type": "1", "metadata": {"window": "3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. ", "original_text": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. "}, "hash": "68b03d1e00486e260420e8155b3299984ed3222cea240d2024c36427fb9bacdd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "First, we can look at scores distributions. ", "mimetype": "text/plain", "start_char_idx": 48666, "end_char_idx": 48710, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e4914e87-fae5-421e-ad75-3c21a73a6665": {"__data__": {"id_": "e4914e87-fae5-421e-ad75-3c21a73a6665", "embedding": null, "metadata": {"window": "3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. ", "original_text": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "36570aa1-8771-4b18-b9d2-0c7d42b87ccb", "node_type": "1", "metadata": {"window": "With that, the score of every image inU is obtained\nusing each ensemble configuration and scoring function.\n 3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. ", "original_text": "First, we can look at scores distributions. "}, "hash": "cc9d15ac04d44669197d3661d0d0433364b78c8b7f9252186614cb1700f1e0f2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8", "node_type": "1", "metadata": {"window": "R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n", "original_text": "As expected, both are long-tailed distributions. "}, "hash": "5c596ea8db4d28696d13b588afd99f92c9b16cc5dd71b0da453fa232d20408c7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. ", "mimetype": "text/plain", "start_char_idx": 48710, "end_char_idx": 48790, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8": {"__data__": {"id_": "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8", "embedding": null, "metadata": {"window": "R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n", "original_text": "As expected, both are long-tailed distributions. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e4914e87-fae5-421e-ad75-3c21a73a6665", "node_type": "1", "metadata": {"window": "3.4. R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. ", "original_text": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. "}, "hash": "68b03d1e00486e260420e8155b3299984ed3222cea240d2024c36427fb9bacdd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25b4d4bd-c952-4e40-9eed-af7eadd747d9", "node_type": "1", "metadata": {"window": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. ", "original_text": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. "}, "hash": "41a838c1ba1a9ae03d4453de99f3960d80f5982b2e04d392f7cf5d7b258b6271", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As expected, both are long-tailed distributions. ", "mimetype": "text/plain", "start_char_idx": 48790, "end_char_idx": 48839, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25b4d4bd-c952-4e40-9eed-af7eadd747d9": {"__data__": {"id_": "25b4d4bd-c952-4e40-9eed-af7eadd747d9", "embedding": null, "metadata": {"window": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. ", "original_text": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8", "node_type": "1", "metadata": {"window": "R ESULTS\n3.4.1. Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n", "original_text": "As expected, both are long-tailed distributions. "}, "hash": "5c596ea8db4d28696d13b588afd99f92c9b16cc5dd71b0da453fa232d20408c7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0aecc149-6458-4984-89c2-daab8a9a2d18", "node_type": "1", "metadata": {"window": "First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. ", "original_text": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. "}, "hash": "a959ead689b0a32ad437adc40ab7f7d2521a33afefe3dbb1972e6f02cac5cbff", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. ", "mimetype": "text/plain", "start_char_idx": 48839, "end_char_idx": 48921, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0aecc149-6458-4984-89c2-daab8a9a2d18": {"__data__": {"id_": "0aecc149-6458-4984-89c2-daab8a9a2d18", "embedding": null, "metadata": {"window": "First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. ", "original_text": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25b4d4bd-c952-4e40-9eed-af7eadd747d9", "node_type": "1", "metadata": {"window": "Scores distributions\nThe analysis of the scores gives several insights, and it is also an opportunity to perform sanity\nchecks. First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. ", "original_text": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. "}, "hash": "41a838c1ba1a9ae03d4453de99f3960d80f5982b2e04d392f7cf5d7b258b6271", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8", "node_type": "1", "metadata": {"window": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n", "original_text": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. "}, "hash": "d9cc8208d9908fe306d7207d33277e74de6babb3e3b451a41924234771a6b457", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. ", "mimetype": "text/plain", "start_char_idx": 48921, "end_char_idx": 49012, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8": {"__data__": {"id_": "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8", "embedding": null, "metadata": {"window": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n", "original_text": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0aecc149-6458-4984-89c2-daab8a9a2d18", "node_type": "1", "metadata": {"window": "First, we can look at scores distributions. The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. ", "original_text": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. "}, "hash": "a959ead689b0a32ad437adc40ab7f7d2521a33afefe3dbb1972e6f02cac5cbff", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f131a84a-7b7f-4220-87a6-b71617cacfb5", "node_type": "1", "metadata": {"window": "As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. ", "original_text": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n"}, "hash": "c9f060287b1f0c97981d7845259200bf120cc6d7d4354365a1a695f6cff14321", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. ", "mimetype": "text/plain", "start_char_idx": 49012, "end_char_idx": 49193, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f131a84a-7b7f-4220-87a6-b71617cacfb5": {"__data__": {"id_": "f131a84a-7b7f-4220-87a6-b71617cacfb5", "embedding": null, "metadata": {"window": "As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. ", "original_text": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8", "node_type": "1", "metadata": {"window": "The mutual information distributions from\n20lastand 5best is given in Fig.3.3a. As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n", "original_text": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. "}, "hash": "d9cc8208d9908fe306d7207d33277e74de6babb3e3b451a41924234771a6b457", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c50d2dbc-59bf-4da4-9705-07120474cd09", "node_type": "1", "metadata": {"window": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. ", "original_text": "The entropy distributions from single and 5best is displayed in Fig.3.3b. "}, "hash": "9d71c5bcd0bb00d7807aa8259eed5ef118ce722a27ebfe251df5af03e43f451d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n", "mimetype": "text/plain", "start_char_idx": 49193, "end_char_idx": 49318, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c50d2dbc-59bf-4da4-9705-07120474cd09": {"__data__": {"id_": "c50d2dbc-59bf-4da4-9705-07120474cd09", "embedding": null, "metadata": {"window": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. ", "original_text": "The entropy distributions from single and 5best is displayed in Fig.3.3b. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f131a84a-7b7f-4220-87a6-b71617cacfb5", "node_type": "1", "metadata": {"window": "As expected, both are long-tailed distributions. This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. ", "original_text": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n"}, "hash": "c9f060287b1f0c97981d7845259200bf120cc6d7d4354365a1a695f6cff14321", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "21b11587-dd3a-4cc2-a120-5d92c953e2d8", "node_type": "1", "metadata": {"window": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. ", "original_text": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. "}, "hash": "bf0de52ce70907f4ed0f57605624ed32bca93a28cc24be8249e5932b8bbe5bfc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The entropy distributions from single and 5best is displayed in Fig.3.3b. ", "mimetype": "text/plain", "start_char_idx": 49318, "end_char_idx": 49392, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "21b11587-dd3a-4cc2-a120-5d92c953e2d8": {"__data__": {"id_": "21b11587-dd3a-4cc2-a120-5d92c953e2d8", "embedding": null, "metadata": {"window": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. ", "original_text": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c50d2dbc-59bf-4da4-9705-07120474cd09", "node_type": "1", "metadata": {"window": "This is\nmore pronounced for 5best, suggesting that this ensemble is more diverse. The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. ", "original_text": "The entropy distributions from single and 5best is displayed in Fig.3.3b. "}, "hash": "9d71c5bcd0bb00d7807aa8259eed5ef118ce722a27ebfe251df5af03e43f451d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0008d989-47ce-41a5-8ea1-d07bd161ac55", "node_type": "1", "metadata": {"window": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. ", "original_text": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n"}, "hash": "94e11e7a5ee4c0c83e68fc818e4345682a2afd183104f96a906cd1f3abff414e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. ", "mimetype": "text/plain", "start_char_idx": 49392, "end_char_idx": 49596, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0008d989-47ce-41a5-8ea1-d07bd161ac55": {"__data__": {"id_": "0008d989-47ce-41a5-8ea1-d07bd161ac55", "embedding": null, "metadata": {"window": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. ", "original_text": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "21b11587-dd3a-4cc2-a120-5d92c953e2d8", "node_type": "1", "metadata": {"window": "The distribution for\n5ckpt, not shown for clarity, is slightly closer to zero than 20last. Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. ", "original_text": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. "}, "hash": "bf0de52ce70907f4ed0f57605624ed32bca93a28cc24be8249e5932b8bbe5bfc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7d6fec03-c63b-4db5-874e-7f2b0e53824b", "node_type": "1", "metadata": {"window": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n", "original_text": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. "}, "hash": "77cf8aadbaa8d982c20838547cb5ae7a4c4772adf60d00995d3fa20712a438c6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n", "mimetype": "text/plain", "start_char_idx": 49596, "end_char_idx": 49728, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7d6fec03-c63b-4db5-874e-7f2b0e53824b": {"__data__": {"id_": "7d6fec03-c63b-4db5-874e-7f2b0e53824b", "embedding": null, "metadata": {"window": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n", "original_text": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0008d989-47ce-41a5-8ea1-d07bd161ac55", "node_type": "1", "metadata": {"window": "Given that mutual informa-\ntion\u2019s maximal value corresponds to the entropy\u2019s maximal value, which is log2(3150) = 11.6,\none can wonder if even the 5best ensemble is diverse enough. Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. ", "original_text": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n"}, "hash": "94e11e7a5ee4c0c83e68fc818e4345682a2afd183104f96a906cd1f3abff414e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7b38d1db-a8dd-49d2-9e4a-5410e0462a60", "node_type": "1", "metadata": {"window": "The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n", "original_text": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. "}, "hash": "8b4ff53307c0743588fd1c922de9d481596e80e41271720df1ce03743281a16b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. ", "mimetype": "text/plain", "start_char_idx": 49728, "end_char_idx": 49885, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7b38d1db-a8dd-49d2-9e4a-5410e0462a60": {"__data__": {"id_": "7b38d1db-a8dd-49d2-9e4a-5410e0462a60", "embedding": null, "metadata": {"window": "The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n", "original_text": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7d6fec03-c63b-4db5-874e-7f2b0e53824b", "node_type": "1", "metadata": {"window": "Perhaps a different split between\ntraining and validation datasets across the five training runs would have been beneficial.\n The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n", "original_text": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. "}, "hash": "77cf8aadbaa8d982c20838547cb5ae7a4c4772adf60d00995d3fa20712a438c6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ece62c27-00c5-4907-8204-44f42aa4f83c", "node_type": "1", "metadata": {"window": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. ", "original_text": "This suggests that for many images, it is hard to\ndistinguish between two classes. "}, "hash": "ca85de52801f2e2a382f4cea60fa78e7f8b4b00dd355a9b5f8e0b2b2622145cb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. ", "mimetype": "text/plain", "start_char_idx": 49885, "end_char_idx": 49963, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ece62c27-00c5-4907-8204-44f42aa4f83c": {"__data__": {"id_": "ece62c27-00c5-4907-8204-44f42aa4f83c", "embedding": null, "metadata": {"window": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. ", "original_text": "This suggests that for many images, it is hard to\ndistinguish between two classes. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7b38d1db-a8dd-49d2-9e4a-5410e0462a60", "node_type": "1", "metadata": {"window": "The entropy distributions from single and 5best is displayed in Fig.3.3b. The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n", "original_text": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. "}, "hash": "8b4ff53307c0743588fd1c922de9d481596e80e41271720df1ce03743281a16b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4191c316-bd5f-47aa-a4da-28ec81d7de88", "node_type": "1", "metadata": {"window": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. ", "original_text": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. "}, "hash": "cf9eed25d253ff936b09faf6352b60d5f67777f1f81e6593b710a3d71d4aa920", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "This suggests that for many images, it is hard to\ndistinguish between two classes. ", "mimetype": "text/plain", "start_char_idx": 49963, "end_char_idx": 50046, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4191c316-bd5f-47aa-a4da-28ec81d7de88": {"__data__": {"id_": "4191c316-bd5f-47aa-a4da-28ec81d7de88", "embedding": null, "metadata": {"window": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. ", "original_text": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ece62c27-00c5-4907-8204-44f42aa4f83c", "node_type": "1", "metadata": {"window": "The two distributions\nare very similar, except that the distribution fromsingle has a 40% higher peak near zero, mean-\ning that the single model has given much more predictions with very high confidence. Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. ", "original_text": "This suggests that for many images, it is hard to\ndistinguish between two classes. "}, "hash": "ca85de52801f2e2a382f4cea60fa78e7f8b4b00dd355a9b5f8e0b2b2622145cb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7eb0f7f7-7af8-45b8-b316-74589a8376a1", "node_type": "1", "metadata": {"window": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. ", "original_text": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n"}, "hash": "146b741479267b88ec7ce46012789ab43ee92b28eb7af4e8e081972e94e3a5c8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. ", "mimetype": "text/plain", "start_char_idx": 50046, "end_char_idx": 50138, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7eb0f7f7-7af8-45b8-b316-74589a8376a1": {"__data__": {"id_": "7eb0f7f7-7af8-45b8-b316-74589a8376a1", "embedding": null, "metadata": {"window": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. ", "original_text": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4191c316-bd5f-47aa-a4da-28ec81d7de88", "node_type": "1", "metadata": {"window": "Unless\nthese predictions are correct, this seems to confirm the idea that a single model\u2019s estimation of\nuncertainty is unreliable.\n The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. ", "original_text": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. "}, "hash": "cf9eed25d253ff936b09faf6352b60d5f67777f1f81e6593b710a3d71d4aa920", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02", "node_type": "1", "metadata": {"window": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. ", "original_text": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n"}, "hash": "e4834cae0657ee853f4a9645d2cfdaab5966ad83ab96c700fdefbb648f8be48b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n", "mimetype": "text/plain", "start_char_idx": 50138, "end_char_idx": 50261, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02": {"__data__": {"id_": "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02", "embedding": null, "metadata": {"window": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. ", "original_text": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7eb0f7f7-7af8-45b8-b316-74589a8376a1", "node_type": "1", "metadata": {"window": "The margin sampling distribution from 20last is presented in Fig.3.3c, as a good representation\nof the distributions from all other ensemble configurations. Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. ", "original_text": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n"}, "hash": "146b741479267b88ec7ce46012789ab43ee92b28eb7af4e8e081972e94e3a5c8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "cb37433a-bb55-47c9-8696-889f36d9a188", "node_type": "1", "metadata": {"window": "This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. ", "original_text": "3.4.2. "}, "hash": "27c817c5fdb8a774f423ae56c100d5c0104bd2b7cbb621a662e91eb8745cdd47", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n", "mimetype": "text/plain", "start_char_idx": 50261, "end_char_idx": 50366, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "cb37433a-bb55-47c9-8696-889f36d9a188": {"__data__": {"id_": "cb37433a-bb55-47c9-8696-889f36d9a188", "embedding": null, "metadata": {"window": "This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. ", "original_text": "3.4.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02", "node_type": "1", "metadata": {"window": "Surprisingly, there are two peaks:\none at the minimum and one at the maximum. This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. ", "original_text": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n"}, "hash": "e4834cae0657ee853f4a9645d2cfdaab5966ad83ab96c700fdefbb648f8be48b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805", "node_type": "1", "metadata": {"window": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. ", "original_text": "Correlation between scores\nAnother point of interest is the correlation between scores. "}, "hash": "7e8d823a43a73d4b3a481253f809b6ee3d48947ab2289d6f0e071a0d77fa0fc9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "3.4.2. ", "mimetype": "text/plain", "start_char_idx": 50366, "end_char_idx": 50373, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805": {"__data__": {"id_": "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805", "embedding": null, "metadata": {"window": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. ", "original_text": "Correlation between scores\nAnother point of interest is the correlation between scores. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "cb37433a-bb55-47c9-8696-889f36d9a188", "node_type": "1", "metadata": {"window": "This suggests that for many images, it is hard to\ndistinguish between two classes. Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. ", "original_text": "3.4.2. "}, "hash": "27c817c5fdb8a774f423ae56c100d5c0104bd2b7cbb621a662e91eb8745cdd47", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a35de6b2-34ee-4b8e-a838-d2bffb62bf27", "node_type": "1", "metadata": {"window": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. ", "original_text": "To some extent, this can be dis-\nplayed in 2D histograms. "}, "hash": "66e4231f1085883887bc4bf8b0e654ddd509ad110444076defbb2d0a1ed54271", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Correlation between scores\nAnother point of interest is the correlation between scores. ", "mimetype": "text/plain", "start_char_idx": 50373, "end_char_idx": 50461, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a35de6b2-34ee-4b8e-a838-d2bffb62bf27": {"__data__": {"id_": "a35de6b2-34ee-4b8e-a838-d2bffb62bf27", "embedding": null, "metadata": {"window": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. ", "original_text": "To some extent, this can be dis-\nplayed in 2D histograms. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805", "node_type": "1", "metadata": {"window": "Finally, the variation ratios distribution is shown in Fig.3.3d,\nfor illustrative purposes. The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. ", "original_text": "Correlation between scores\nAnother point of interest is the correlation between scores. "}, "hash": "7e8d823a43a73d4b3a481253f809b6ee3d48947ab2289d6f0e071a0d77fa0fc9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a363da84-0efa-4250-8956-07f98d870449", "node_type": "1", "metadata": {"window": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n", "original_text": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. "}, "hash": "3ec3dd03d4e0862db995fa6ab4ef9f3c7de8cfc5076d1704ea40a376bc79c806", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "To some extent, this can be dis-\nplayed in 2D histograms. ", "mimetype": "text/plain", "start_char_idx": 50461, "end_char_idx": 50519, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a363da84-0efa-4250-8956-07f98d870449": {"__data__": {"id_": "a363da84-0efa-4250-8956-07f98d870449", "embedding": null, "metadata": {"window": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n", "original_text": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a35de6b2-34ee-4b8e-a838-d2bffb62bf27", "node_type": "1", "metadata": {"window": "The least confidence distribution (which is also long-tailed, although\nless so than the entropy\u2019s) is omitted for brevity.\n 24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. ", "original_text": "To some extent, this can be dis-\nplayed in 2D histograms. "}, "hash": "66e4231f1085883887bc4bf8b0e654ddd509ad110444076defbb2d0a1ed54271", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9cf97173-98a8-417e-a319-eae2ab22fce8", "node_type": "1", "metadata": {"window": "3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n", "original_text": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. "}, "hash": "e4b24106d707dbd5dff7b368f32aa7be29c62c6f663fbdc031e2fd76979d0590", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. ", "mimetype": "text/plain", "start_char_idx": 50519, "end_char_idx": 50626, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9cf97173-98a8-417e-a319-eae2ab22fce8": {"__data__": {"id_": "9cf97173-98a8-417e-a319-eae2ab22fce8", "embedding": null, "metadata": {"window": "3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n", "original_text": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a363da84-0efa-4250-8956-07f98d870449", "node_type": "1", "metadata": {"window": "24\n\n(a)\n (b)\n(c)\n (d)\nFigure 3.3: Distribution of different scores with various ensemble configurations.\n 3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n", "original_text": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. "}, "hash": "3ec3dd03d4e0862db995fa6ab4ef9f3c7de8cfc5076d1704ea40a376bc79c806", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8a6dd772-4525-4a20-87d2-c9e7ffd920d0", "node_type": "1", "metadata": {"window": "Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. ", "original_text": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. "}, "hash": "b224fde266a4740089ccb4a0f5191f9b0fa9197933826fee32c4a4aeebfae290", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. ", "mimetype": "text/plain", "start_char_idx": 50626, "end_char_idx": 50975, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8a6dd772-4525-4a20-87d2-c9e7ffd920d0": {"__data__": {"id_": "8a6dd772-4525-4a20-87d2-c9e7ffd920d0", "embedding": null, "metadata": {"window": "Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. ", "original_text": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9cf97173-98a8-417e-a319-eae2ab22fce8", "node_type": "1", "metadata": {"window": "3.4.2. Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n", "original_text": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. "}, "hash": "e4b24106d707dbd5dff7b368f32aa7be29c62c6f663fbdc031e2fd76979d0590", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "78a7aef0-2ea7-4f55-83f0-dd1319f282d5", "node_type": "1", "metadata": {"window": "To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. ", "original_text": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. "}, "hash": "cd9f006b816818881086d1d6e5043b409fbbd2887aea43597af49550bf6b4f51", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. ", "mimetype": "text/plain", "start_char_idx": 50975, "end_char_idx": 51057, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "78a7aef0-2ea7-4f55-83f0-dd1319f282d5": {"__data__": {"id_": "78a7aef0-2ea7-4f55-83f0-dd1319f282d5", "embedding": null, "metadata": {"window": "To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. ", "original_text": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8a6dd772-4525-4a20-87d2-c9e7ffd920d0", "node_type": "1", "metadata": {"window": "Correlation between scores\nAnother point of interest is the correlation between scores. To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. ", "original_text": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. "}, "hash": "b224fde266a4740089ccb4a0f5191f9b0fa9197933826fee32c4a4aeebfae290", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4a0d157c-3379-4047-96e9-4a52fb403ac1", "node_type": "1", "metadata": {"window": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n", "original_text": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n"}, "hash": "3b17e30cd7125f24fcd76ae892a57d5eaf536e77b68925ee1e6046e842b47af0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. ", "mimetype": "text/plain", "start_char_idx": 51057, "end_char_idx": 51161, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4a0d157c-3379-4047-96e9-4a52fb403ac1": {"__data__": {"id_": "4a0d157c-3379-4047-96e9-4a52fb403ac1", "embedding": null, "metadata": {"window": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n", "original_text": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "78a7aef0-2ea7-4f55-83f0-dd1319f282d5", "node_type": "1", "metadata": {"window": "To some extent, this can be dis-\nplayed in 2D histograms. In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. ", "original_text": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. "}, "hash": "cd9f006b816818881086d1d6e5043b409fbbd2887aea43597af49550bf6b4f51", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "13dded4f-225a-4a43-a4ea-b552be510614", "node_type": "1", "metadata": {"window": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. ", "original_text": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n"}, "hash": "1168d97081d2d51c6b98b2beb9d4ba1e2fdde072dadf6e3fb2199700fed71559", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n", "mimetype": "text/plain", "start_char_idx": 51161, "end_char_idx": 51259, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "13dded4f-225a-4a43-a4ea-b552be510614": {"__data__": {"id_": "13dded4f-225a-4a43-a4ea-b552be510614", "embedding": null, "metadata": {"window": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. ", "original_text": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4a0d157c-3379-4047-96e9-4a52fb403ac1", "node_type": "1", "metadata": {"window": "In Fig.3.4, joint distributions of different scores from the 5best en-\nsemble configuration are displayed. While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n", "original_text": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n"}, "hash": "3b17e30cd7125f24fcd76ae892a57d5eaf536e77b68925ee1e6046e842b47af0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8bcf0b40-86c8-43f8-8165-8350447e3692", "node_type": "1", "metadata": {"window": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n", "original_text": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. "}, "hash": "7fd20316320f70194c9b0bedf936fc00422653f666c569a96d9172b017740fb2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n", "mimetype": "text/plain", "start_char_idx": 51259, "end_char_idx": 51453, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8bcf0b40-86c8-43f8-8165-8350447e3692": {"__data__": {"id_": "8bcf0b40-86c8-43f8-8165-8350447e3692", "embedding": null, "metadata": {"window": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n", "original_text": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "13dded4f-225a-4a43-a4ea-b552be510614", "node_type": "1", "metadata": {"window": "While the vast majority of samples is concentrated at low\nscores, for the higher scores least confidence and entropy appear significantly more correlated\nthan entropy and mutual information; this is unsurprising, given that least confidence and en-\ntropy are conceptually similar, while entropy and mutual information are fundamentally differ-\nent. The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. ", "original_text": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n"}, "hash": "1168d97081d2d51c6b98b2beb9d4ba1e2fdde072dadf6e3fb2199700fed71559", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "59331595-90e9-48ea-93c5-4dae43721d24", "node_type": "1", "metadata": {"window": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. ", "original_text": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. "}, "hash": "0693d55e6d4667bd26b69aa5568fd599f8ae19a27c02b2899404d07398cf133d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. ", "mimetype": "text/plain", "start_char_idx": 51453, "end_char_idx": 51632, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "59331595-90e9-48ea-93c5-4dae43721d24": {"__data__": {"id_": "59331595-90e9-48ea-93c5-4dae43721d24", "embedding": null, "metadata": {"window": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. ", "original_text": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8bcf0b40-86c8-43f8-8165-8350447e3692", "node_type": "1", "metadata": {"window": "The other two ensemble configurations, 20last and 5ckpt, give very similar plots. In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n", "original_text": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. "}, "hash": "7fd20316320f70194c9b0bedf936fc00422653f666c569a96d9172b017740fb2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6b96aa91-3327-4d94-a271-155962b1cc7f", "node_type": "1", "metadata": {"window": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n", "original_text": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n"}, "hash": "9b63c7326d322198859bf3254c734290a358b79ccd1c922abd46ccaac2733e87", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. ", "mimetype": "text/plain", "start_char_idx": 51632, "end_char_idx": 51752, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6b96aa91-3327-4d94-a271-155962b1cc7f": {"__data__": {"id_": "6b96aa91-3327-4d94-a271-155962b1cc7f", "embedding": null, "metadata": {"window": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n", "original_text": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "59331595-90e9-48ea-93c5-4dae43721d24", "node_type": "1", "metadata": {"window": "In Fig.3.5,\njoint distributions of the same score from different ensemble configurations are presented. As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. ", "original_text": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. "}, "hash": "0693d55e6d4667bd26b69aa5568fd599f8ae19a27c02b2899404d07398cf133d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d6c90984-f636-4d04-a29a-c6324e0bf2ae", "node_type": "1", "metadata": {"window": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. ", "original_text": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. "}, "hash": "c498ba3a4d23571f534965da6fb07c3552c68fecafeeb7eb6c4b0e92ca3c408f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n", "mimetype": "text/plain", "start_char_idx": 51752, "end_char_idx": 51887, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d6c90984-f636-4d04-a29a-c6324e0bf2ae": {"__data__": {"id_": "d6c90984-f636-4d04-a29a-c6324e0bf2ae", "embedding": null, "metadata": {"window": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. ", "original_text": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6b96aa91-3327-4d94-a271-155962b1cc7f", "node_type": "1", "metadata": {"window": "As\nexpected, 20last and 5ckpt present high correlations, both for entropy and mutual information.\n In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n", "original_text": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n"}, "hash": "9b63c7326d322198859bf3254c734290a358b79ccd1c922abd46ccaac2733e87", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "38cdb584-5486-4857-b69d-0bd473e6bbee", "node_type": "1", "metadata": {"window": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n", "original_text": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n"}, "hash": "1ebe7a1300a17051fc888b5a1da99ebef5ed1650b4809a6b8e7559628a36f2ca", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. ", "mimetype": "text/plain", "start_char_idx": 51887, "end_char_idx": 52035, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "38cdb584-5486-4857-b69d-0bd473e6bbee": {"__data__": {"id_": "38cdb584-5486-4857-b69d-0bd473e6bbee", "embedding": null, "metadata": {"window": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n", "original_text": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d6c90984-f636-4d04-a29a-c6324e0bf2ae", "node_type": "1", "metadata": {"window": "In contrast, 20last and 5best present lower correlation, especially for mutual information, sug-\ngesting that 20last might not be as good as5best (which is the standard ensemble configuration).\n Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. ", "original_text": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. "}, "hash": "c498ba3a4d23571f534965da6fb07c3552c68fecafeeb7eb6c4b0e92ca3c408f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "90b08268-8e7f-4f3f-bbba-37a3309c088d", "node_type": "1", "metadata": {"window": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. ", "original_text": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. "}, "hash": "974deeea697d994cfdf9876e70343f5567cb5a40ab32683929594770c6688446", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n", "mimetype": "text/plain", "start_char_idx": 52035, "end_char_idx": 52087, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "90b08268-8e7f-4f3f-bbba-37a3309c088d": {"__data__": {"id_": "90b08268-8e7f-4f3f-bbba-37a3309c088d", "embedding": null, "metadata": {"window": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. ", "original_text": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "38cdb584-5486-4857-b69d-0bd473e6bbee", "node_type": "1", "metadata": {"window": "Finally, for each scoring function and ensemble configuration, the images in U are ranked,\nand the top-K are selected to be added to L. K is chosen such that K = | U|/9 = | L|/3. For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n", "original_text": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n"}, "hash": "1ebe7a1300a17051fc888b5a1da99ebef5ed1650b4809a6b8e7559628a36f2ca", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e", "node_type": "1", "metadata": {"window": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n", "original_text": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n"}, "hash": "664da35da316036f270faf541ae6a29625bb699e4f62b53a624ca9f337906194", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. ", "mimetype": "text/plain", "start_char_idx": 52087, "end_char_idx": 52258, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e": {"__data__": {"id_": "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e", "embedding": null, "metadata": {"window": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n", "original_text": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "90b08268-8e7f-4f3f-bbba-37a3309c088d", "node_type": "1", "metadata": {"window": "For\nsimplicity, only the entropy and mutual information scoring functions are discussed for the\nrest of the experiment. As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. ", "original_text": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. "}, "hash": "974deeea697d994cfdf9876e70343f5567cb5a40ab32683929594770c6688446", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef", "node_type": "1", "metadata": {"window": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. ", "original_text": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. "}, "hash": "d74d18af506a7b4fa2be9cdc1f2482f09d06cfdb3ede4e141e923b819f53ef09", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n", "mimetype": "text/plain", "start_char_idx": 52258, "end_char_idx": 52309, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef": {"__data__": {"id_": "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef", "embedding": null, "metadata": {"window": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. ", "original_text": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e", "node_type": "1", "metadata": {"window": "As a sanity check, the overlap between the newly selected datasets is\npresented in Table3.3 in terms of Intersection over Union (IoU).\n 25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n", "original_text": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n"}, "hash": "664da35da316036f270faf541ae6a29625bb699e4f62b53a624ca9f337906194", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f97c811a-03ec-418b-8eb3-e24161f195f2", "node_type": "1", "metadata": {"window": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. ", "original_text": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n"}, "hash": "31317f6f234053aea3724269158d85e61470ea0a2114c682f7a3984a8993cf08", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. ", "mimetype": "text/plain", "start_char_idx": 52309, "end_char_idx": 52754, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f97c811a-03ec-418b-8eb3-e24161f195f2": {"__data__": {"id_": "f97c811a-03ec-418b-8eb3-e24161f195f2", "embedding": null, "metadata": {"window": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. ", "original_text": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef", "node_type": "1", "metadata": {"window": "25\n\n(a) \u03c1 = 0.99\n (b) \u03c1 = 0.94\nFigure 3.4: 2D histograms displaying the correlation between different scores, from the same\nensemble configuration. Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. ", "original_text": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. "}, "hash": "d74d18af506a7b4fa2be9cdc1f2482f09d06cfdb3ede4e141e923b819f53ef09", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "57f0902e-649d-43b6-a41b-d6031f2a572e", "node_type": "1", "metadata": {"window": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n", "original_text": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. "}, "hash": "5879915ef616fe841d0bd5d2a2f4b1a2db46afa7300221adeed064e1b9c5632e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n", "mimetype": "text/plain", "start_char_idx": 52754, "end_char_idx": 52879, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "57f0902e-649d-43b6-a41b-d6031f2a572e": {"__data__": {"id_": "57f0902e-649d-43b6-a41b-d6031f2a572e", "embedding": null, "metadata": {"window": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n", "original_text": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f97c811a-03ec-418b-8eb3-e24161f195f2", "node_type": "1", "metadata": {"window": "Spearman\u2019s rank correlation coefficient \u03c1 is given.\n (a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. ", "original_text": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n"}, "hash": "31317f6f234053aea3724269158d85e61470ea0a2114c682f7a3984a8993cf08", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c5589445-716f-48e7-b5f4-e50f81a8b5e8", "node_type": "1", "metadata": {"window": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. ", "original_text": "Three of these images are shared between subsets.\n"}, "hash": "a4e28421fef963894a3819b7827c5fe14936dbb16d4ec4a42337205285717724", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. ", "mimetype": "text/plain", "start_char_idx": 52879, "end_char_idx": 53016, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c5589445-716f-48e7-b5f4-e50f81a8b5e8": {"__data__": {"id_": "c5589445-716f-48e7-b5f4-e50f81a8b5e8", "embedding": null, "metadata": {"window": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. ", "original_text": "Three of these images are shared between subsets.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "57f0902e-649d-43b6-a41b-d6031f2a572e", "node_type": "1", "metadata": {"window": "(a) \u03c1 = 0.92\n (b) \u03c1 = 0.99\n(c) \u03c1 = 0.87\n (d) \u03c1 = 0.96\nFigure 3.5: 2D histograms displaying the correlation between the same score, from different\nensemble configurations. Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n", "original_text": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. "}, "hash": "5879915ef616fe841d0bd5d2a2f4b1a2db46afa7300221adeed064e1b9c5632e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9edfebce-cda2-44e7-bf93-22f1471af47f", "node_type": "1", "metadata": {"window": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. ", "original_text": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. "}, "hash": "a948e99b6aae1535c32fd68f7605db527526a8d13b904e605647e46f1599df88", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Three of these images are shared between subsets.\n", "mimetype": "text/plain", "start_char_idx": 53016, "end_char_idx": 53066, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9edfebce-cda2-44e7-bf93-22f1471af47f": {"__data__": {"id_": "9edfebce-cda2-44e7-bf93-22f1471af47f", "embedding": null, "metadata": {"window": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. ", "original_text": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c5589445-716f-48e7-b5f4-e50f81a8b5e8", "node_type": "1", "metadata": {"window": "Spearman\u2019s rank correlation coefficient\u03c1 is given.\n 26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. ", "original_text": "Three of these images are shared between subsets.\n"}, "hash": "a4e28421fef963894a3819b7827c5fe14936dbb16d4ec4a42337205285717724", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1", "node_type": "1", "metadata": {"window": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. ", "original_text": "In Fig. "}, "hash": "c7fb5f854726ab102754e2bc58293ff1f7135b828cc1434be4cbebb868f7fed6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. ", "mimetype": "text/plain", "start_char_idx": 53066, "end_char_idx": 53184, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1": {"__data__": {"id_": "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1", "embedding": null, "metadata": {"window": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. ", "original_text": "In Fig. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9edfebce-cda2-44e7-bf93-22f1471af47f", "node_type": "1", "metadata": {"window": "26\n\nTable 3.3: Intersection over Union (IoU) between sets selected with different ensemble config-\nurations and three acquisition functions: entropy H, mutual information J, and random\n5best 20last 5ckpt single /\nH J H J H J H random\n5best H 1 0.37 0.56 0.29 0.53 0.27 0.49 0.06\nJ 1 0.30 0.38 0.39 0.34 0.26 0.06\n20last H 1 0.32 0.83 0.29 0.66 0.06\nJ 1 0.29 0.57 0.26 0.06\n5ckpt H 1 0.27 0.7 0.06\nJ 1 0.24 0.06\nsingle H 1 0.06\n/ random 1\n3.4.3. Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. ", "original_text": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. "}, "hash": "a948e99b6aae1535c32fd68f7605db527526a8d13b904e605647e46f1599df88", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f", "node_type": "1", "metadata": {"window": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. ", "original_text": "3.8, the top five images in the entropy ranking are shown.\n"}, "hash": "9f3a17f1a23d612c8b768951cb648baac4ac3fdb4b63527df832a400d630ce9e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In Fig. ", "mimetype": "text/plain", "start_char_idx": 53184, "end_char_idx": 53192, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f": {"__data__": {"id_": "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f", "embedding": null, "metadata": {"window": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. ", "original_text": "3.8, the top five images in the entropy ranking are shown.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1", "node_type": "1", "metadata": {"window": "Visualization of ranked images\nThe visualization of top and bottom images in the score rankings is another insightful point.\n In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. ", "original_text": "In Fig. "}, "hash": "c7fb5f854726ab102754e2bc58293ff1f7135b828cc1434be4cbebb868f7fed6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e1252243-36f1-48a2-82b5-2f450c0eca67", "node_type": "1", "metadata": {"window": "Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n", "original_text": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. "}, "hash": "7e7aab0b7b90cd579df30cc88a9c1f19bfe0d0f362c4ca200d708ded86edffb4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "3.8, the top five images in the entropy ranking are shown.\n", "mimetype": "text/plain", "start_char_idx": 53192, "end_char_idx": 53251, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e1252243-36f1-48a2-82b5-2f450c0eca67": {"__data__": {"id_": "e1252243-36f1-48a2-82b5-2f450c0eca67", "embedding": null, "metadata": {"window": "Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n", "original_text": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f", "node_type": "1", "metadata": {"window": "In Fig.3.6, the bottom five images in U according to the entropy and mutual information rank-\nings for the 5best ensemble are displayed. Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. ", "original_text": "3.8, the top five images in the entropy ranking are shown.\n"}, "hash": "9f3a17f1a23d612c8b768951cb648baac4ac3fdb4b63527df832a400d630ce9e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "65c2052a-2aca-462e-a47c-79262bbc21c6", "node_type": "1", "metadata": {"window": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. ", "original_text": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. "}, "hash": "62ec895b0eb7fabcd236b391005409288c34c5495ec26ae8966aa5f56629e455", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. ", "mimetype": "text/plain", "start_char_idx": 53251, "end_char_idx": 53394, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "65c2052a-2aca-462e-a47c-79262bbc21c6": {"__data__": {"id_": "65c2052a-2aca-462e-a47c-79262bbc21c6", "embedding": null, "metadata": {"window": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. ", "original_text": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e1252243-36f1-48a2-82b5-2f450c0eca67", "node_type": "1", "metadata": {"window": "Three of these images are shared between subsets.\n Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n", "original_text": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. "}, "hash": "7e7aab0b7b90cd579df30cc88a9c1f19bfe0d0f362c4ca200d708ded86edffb4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a540e60e-c64b-4a8d-a490-41870f386ce6", "node_type": "1", "metadata": {"window": "In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n", "original_text": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. "}, "hash": "33fc36fed362a6a9371e8b2676bf530cac28cf7c3c60a94635b4a9798db88d65", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. ", "mimetype": "text/plain", "start_char_idx": 53394, "end_char_idx": 53551, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a540e60e-c64b-4a8d-a490-41870f386ce6": {"__data__": {"id_": "a540e60e-c64b-4a8d-a490-41870f386ce6", "embedding": null, "metadata": {"window": "In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n", "original_text": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "65c2052a-2aca-462e-a47c-79262bbc21c6", "node_type": "1", "metadata": {"window": "Interestingly, the model seems to easily classify images where the same species of moths is\npresented multiple times. In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. ", "original_text": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. "}, "hash": "62ec895b0eb7fabcd236b391005409288c34c5495ec26ae8966aa5f56629e455", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a6a97211-221b-49c5-88b6-2c01f313ca02", "node_type": "1", "metadata": {"window": "3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. ", "original_text": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. "}, "hash": "3cd4601de016eb12b9ccba3e625d1a107f730edc756a23ea36bdc0199dace125", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. ", "mimetype": "text/plain", "start_char_idx": 53551, "end_char_idx": 53648, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a6a97211-221b-49c5-88b6-2c01f313ca02": {"__data__": {"id_": "a6a97211-221b-49c5-88b6-2c01f313ca02", "embedding": null, "metadata": {"window": "3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. ", "original_text": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a540e60e-c64b-4a8d-a490-41870f386ce6", "node_type": "1", "metadata": {"window": "In Fig. 3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n", "original_text": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. "}, "hash": "33fc36fed362a6a9371e8b2676bf530cac28cf7c3c60a94635b4a9798db88d65", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "808d1512-1f22-4294-954d-15dce378b092", "node_type": "1", "metadata": {"window": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n", "original_text": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n"}, "hash": "4b910735ca73e4b431e6052dcefee32d700a647fd31b91066697494b122db290", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. ", "mimetype": "text/plain", "start_char_idx": 53648, "end_char_idx": 53833, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "808d1512-1f22-4294-954d-15dce378b092": {"__data__": {"id_": "808d1512-1f22-4294-954d-15dce378b092", "embedding": null, "metadata": {"window": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n", "original_text": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a6a97211-221b-49c5-88b6-2c01f313ca02", "node_type": "1", "metadata": {"window": "3.8, the top five images in the entropy ranking are shown.\n These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. ", "original_text": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. "}, "hash": "3cd4601de016eb12b9ccba3e625d1a107f730edc756a23ea36bdc0199dace125", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aba2add0-9fc9-4cca-914e-7a03af84e852", "node_type": "1", "metadata": {"window": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. ", "original_text": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. "}, "hash": "31d1aa4452c37394a14e40135e6dc601ff2e0b7dbd50fa20878fd7af0183da02", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n", "mimetype": "text/plain", "start_char_idx": 53833, "end_char_idx": 53922, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aba2add0-9fc9-4cca-914e-7a03af84e852": {"__data__": {"id_": "aba2add0-9fc9-4cca-914e-7a03af84e852", "embedding": null, "metadata": {"window": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. ", "original_text": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "808d1512-1f22-4294-954d-15dce378b092", "node_type": "1", "metadata": {"window": "These images raise an alarm, as they have nothing to do in a training set for a classifier to be\nused on trap images such as those in Fig.3.2. In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n", "original_text": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n"}, "hash": "4b910735ca73e4b431e6052dcefee32d700a647fd31b91066697494b122db290", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3ed751a7-875c-4061-a51e-39ed7a1a63b4", "node_type": "1", "metadata": {"window": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n", "original_text": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n"}, "hash": "440200f51663c7235ac0798bbddce43edf684cd7ed51a1fb0939297491ab3ade", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. ", "mimetype": "text/plain", "start_char_idx": 53922, "end_char_idx": 54135, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3ed751a7-875c-4061-a51e-39ed7a1a63b4": {"__data__": {"id_": "3ed751a7-875c-4061-a51e-39ed7a1a63b4", "embedding": null, "metadata": {"window": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n", "original_text": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aba2add0-9fc9-4cca-914e-7a03af84e852", "node_type": "1", "metadata": {"window": "In fact, they either don\u2019t display a live moth at\nall, or the moth occupies a minimal portion of the image, on top of being quite blended in the\nbackground. The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. ", "original_text": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. "}, "hash": "31d1aa4452c37394a14e40135e6dc601ff2e0b7dbd50fa20878fd7af0183da02", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c902ecc8-41f1-4a31-ad05-0de98e694a77", "node_type": "1", "metadata": {"window": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. ", "original_text": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. "}, "hash": "f4416b101c20cfbc1af7b252bce34953d3a6d35893ae28db1be35faf8f823140", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n", "mimetype": "text/plain", "start_char_idx": 54135, "end_char_idx": 54230, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c902ecc8-41f1-4a31-ad05-0de98e694a77": {"__data__": {"id_": "c902ecc8-41f1-4a31-ad05-0de98e694a77", "embedding": null, "metadata": {"window": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. ", "original_text": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3ed751a7-875c-4061-a51e-39ed7a1a63b4", "node_type": "1", "metadata": {"window": "The previous are impossible to classify, in other words, they have a high aleatoric\nuncertainty. Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n", "original_text": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n"}, "hash": "440200f51663c7235ac0798bbddce43edf684cd7ed51a1fb0939297491ab3ade", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "066a6326-763f-4494-a458-53350b36806f", "node_type": "1", "metadata": {"window": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n", "original_text": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n"}, "hash": "5946b43a48df1643bbf47655dcee5a598c081d72ad360f9268636baafc1911f9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. ", "mimetype": "text/plain", "start_char_idx": 54230, "end_char_idx": 54492, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "066a6326-763f-4494-a458-53350b36806f": {"__data__": {"id_": "066a6326-763f-4494-a458-53350b36806f", "embedding": null, "metadata": {"window": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n", "original_text": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c902ecc8-41f1-4a31-ad05-0de98e694a77", "node_type": "1", "metadata": {"window": "Hence, it makes sense that the entropy score is high (and, by the way, the least con-\nfidence and margin sampling scores as well), while the mutual information score is relatively\nlow. Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. ", "original_text": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. "}, "hash": "f4416b101c20cfbc1af7b252bce34953d3a6d35893ae28db1be35faf8f823140", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "934bf8f0-f438-4364-8583-f6f3bd4bc27c", "node_type": "1", "metadata": {"window": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. ", "original_text": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. "}, "hash": "675b066129a91062e30b075a8adffa31a75cb7f382d71cf5d8bb15078bae430b", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n", "mimetype": "text/plain", "start_char_idx": 54492, "end_char_idx": 54707, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "934bf8f0-f438-4364-8583-f6f3bd4bc27c": {"__data__": {"id_": "934bf8f0-f438-4364-8583-f6f3bd4bc27c", "embedding": null, "metadata": {"window": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. ", "original_text": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "066a6326-763f-4494-a458-53350b36806f", "node_type": "1", "metadata": {"window": "Finally, the top five images in the mutual information ranking are presented in Fig.3.7.\n Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n", "original_text": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n"}, "hash": "5946b43a48df1643bbf47655dcee5a598c081d72ad360f9268636baafc1911f9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0470d06d-df1d-4c0f-865b-64b7c566502b", "node_type": "1", "metadata": {"window": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n", "original_text": "Some images appear in both bottom fives.\n"}, "hash": "69fcb4f98688940116470e8e6e869943a6e613fc32983e4eecf057306d828973", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. ", "mimetype": "text/plain", "start_char_idx": 54707, "end_char_idx": 54863, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0470d06d-df1d-4c0f-865b-64b7c566502b": {"__data__": {"id_": "0470d06d-df1d-4c0f-865b-64b7c566502b", "embedding": null, "metadata": {"window": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n", "original_text": "Some images appear in both bottom fives.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "934bf8f0-f438-4364-8583-f6f3bd4bc27c", "node_type": "1", "metadata": {"window": "Here, with one exception (which has the highest entropy), the moths are well distinguishable\nfrom the background, but again they only occupy a very small portion of the image, which is\nchallenging for the models. Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. ", "original_text": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. "}, "hash": "675b066129a91062e30b075a8adffa31a75cb7f382d71cf5d8bb15078bae430b", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8f59cd69-4b89-4d65-a5f9-c158df2b2562", "node_type": "1", "metadata": {"window": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. ", "original_text": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. "}, "hash": "eeb2e3a9ff08fcb8997be16ef5418c6b655a3ba4502051a501e103cc3cdc65ce", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Some images appear in both bottom fives.\n", "mimetype": "text/plain", "start_char_idx": 54863, "end_char_idx": 54904, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8f59cd69-4b89-4d65-a5f9-c158df2b2562": {"__data__": {"id_": "8f59cd69-4b89-4d65-a5f9-c158df2b2562", "embedding": null, "metadata": {"window": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. ", "original_text": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0470d06d-df1d-4c0f-865b-64b7c566502b", "node_type": "1", "metadata": {"window": "Again, these images don\u2019t seem helpful at all for a model to be\nused on the images in Fig.3.2.\n In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n", "original_text": "Some images appear in both bottom fives.\n"}, "hash": "69fcb4f98688940116470e8e6e869943a6e613fc32983e4eecf057306d828973", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034", "node_type": "1", "metadata": {"window": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. ", "original_text": "The entropy score for\neach image is given.\n"}, "hash": "13c996212621f47e0f6044a2514f74332c0a5616cdc875057ce2c831eff6d5d4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. ", "mimetype": "text/plain", "start_char_idx": 54904, "end_char_idx": 55001, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034": {"__data__": {"id_": "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034", "embedding": null, "metadata": {"window": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. ", "original_text": "The entropy score for\neach image is given.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8f59cd69-4b89-4d65-a5f9-c158df2b2562", "node_type": "1", "metadata": {"window": "In light of these findings, when applied to the GBIF dataset, it is more likely that the uncertainty-\nbased active learning techniques might serve a different purpose than expected: to filter out\nimages that are too difficult and different from the trap images. Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. ", "original_text": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. "}, "hash": "eeb2e3a9ff08fcb8997be16ef5418c6b655a3ba4502051a501e103cc3cdc65ce", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e", "node_type": "1", "metadata": {"window": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n", "original_text": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. "}, "hash": "cf851195edb1b984b6feea7356f6df6b1be1f4b8170cf5bcdbad806c00e8bc74", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The entropy score for\neach image is given.\n", "mimetype": "text/plain", "start_char_idx": 55001, "end_char_idx": 55044, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e": {"__data__": {"id_": "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e", "embedding": null, "metadata": {"window": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n", "original_text": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034", "node_type": "1", "metadata": {"window": "Tests to see if the selected im-\nages would indeed hinder the model\u2019s learning \u2014by completing the first iteration of the active\nlearning evaluation\u2014 could not be completed in time, and are left for future research.\n 27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. ", "original_text": "The entropy score for\neach image is given.\n"}, "hash": "13c996212621f47e0f6044a2514f74332c0a5616cdc875057ce2c831eff6d5d4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "98785b88-b462-4f3d-9689-13c8524b4738", "node_type": "1", "metadata": {"window": "Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. ", "original_text": "The mutual information score of\neach image is given.\n"}, "hash": "bfec69013c5d078c4e29d5a31574378acda30d9b964af12f0115e28cd32b2576", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. ", "mimetype": "text/plain", "start_char_idx": 55044, "end_char_idx": 55139, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "98785b88-b462-4f3d-9689-13c8524b4738": {"__data__": {"id_": "98785b88-b462-4f3d-9689-13c8524b4738", "embedding": null, "metadata": {"window": "Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. ", "original_text": "The mutual information score of\neach image is given.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e", "node_type": "1", "metadata": {"window": "27\n\nJ\n J\n J and H\n H\nH\n J and H\n J and H\nFigure 3.6: Bottom five images in the entropy ( H) and mutual information ( J) rankings, using\nthe 5best ensemble. Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n", "original_text": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. "}, "hash": "cf851195edb1b984b6feea7356f6df6b1be1f4b8170cf5bcdbad806c00e8bc74", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bb8c9d31-dd2e-4f50-8295-467ed70ae70a", "node_type": "1", "metadata": {"window": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. ", "original_text": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. "}, "hash": "b1353069f94b8dfa3c840b0b30607efc9945bac7edbf5f1482f2df523f9f788d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The mutual information score of\neach image is given.\n", "mimetype": "text/plain", "start_char_idx": 55139, "end_char_idx": 55192, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bb8c9d31-dd2e-4f50-8295-467ed70ae70a": {"__data__": {"id_": "bb8c9d31-dd2e-4f50-8295-467ed70ae70a", "embedding": null, "metadata": {"window": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. ", "original_text": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "98785b88-b462-4f3d-9689-13c8524b4738", "node_type": "1", "metadata": {"window": "Some images appear in both bottom fives.\n 4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. ", "original_text": "The mutual information score of\neach image is given.\n"}, "hash": "bfec69013c5d078c4e29d5a31574378acda30d9b964af12f0115e28cd32b2576", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7e95e5e0-aed3-4fc4-909d-b17407e921b8", "node_type": "1", "metadata": {"window": "The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. ", "original_text": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. "}, "hash": "ba18f74a39f9788a64c7538292911fb0e0c2cc6c8742aed0e0d4b783c2355ec0", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. ", "mimetype": "text/plain", "start_char_idx": 55192, "end_char_idx": 55293, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7e95e5e0-aed3-4fc4-909d-b17407e921b8": {"__data__": {"id_": "7e95e5e0-aed3-4fc4-909d-b17407e921b8", "embedding": null, "metadata": {"window": "The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. ", "original_text": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bb8c9d31-dd2e-4f50-8295-467ed70ae70a", "node_type": "1", "metadata": {"window": "4.9\n 5.6\n 5.2\n 4.3\n 4.6\nFigure 3.7: Top five images in the mutual information ranking with5best. The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. ", "original_text": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. "}, "hash": "b1353069f94b8dfa3c840b0b30607efc9945bac7edbf5f1482f2df523f9f788d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19", "node_type": "1", "metadata": {"window": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. ", "original_text": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n"}, "hash": "475f5162211a65e002a737eb116383088f917d019fd94808ad7f04967f6657e5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. ", "mimetype": "text/plain", "start_char_idx": 55293, "end_char_idx": 55398, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19": {"__data__": {"id_": "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19", "embedding": null, "metadata": {"window": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. ", "original_text": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7e95e5e0-aed3-4fc4-909d-b17407e921b8", "node_type": "1", "metadata": {"window": "The entropy score for\neach image is given.\n 28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. ", "original_text": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. "}, "hash": "ba18f74a39f9788a64c7538292911fb0e0c2cc6c8742aed0e0d4b783c2355ec0", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e0975843-1213-4bb8-b0f7-89433f6505f8", "node_type": "1", "metadata": {"window": "The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. ", "original_text": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. "}, "hash": "d4b029ccc01060d9a3cd939ceb3e02ec6cea23fe4072927ad79037498f486643", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n", "mimetype": "text/plain", "start_char_idx": 55398, "end_char_idx": 55496, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e0975843-1213-4bb8-b0f7-89433f6505f8": {"__data__": {"id_": "e0975843-1213-4bb8-b0f7-89433f6505f8", "embedding": null, "metadata": {"window": "The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. ", "original_text": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19", "node_type": "1", "metadata": {"window": "28\n\n0.38\n 0.45\n 0.48\n0.66\n 0.39\nFigure 3.8: Top five images in the entropy ranking with 5best. The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. ", "original_text": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n"}, "hash": "475f5162211a65e002a737eb116383088f917d019fd94808ad7f04967f6657e5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bea604cd-e1e7-4cfd-a819-e8b4c124fa80", "node_type": "1", "metadata": {"window": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. ", "original_text": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. "}, "hash": "0fdcc99525117298d8d7d80c22a481e8921632b7d7521642526f7ca2629ebe57", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. ", "mimetype": "text/plain", "start_char_idx": 55496, "end_char_idx": 55601, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bea604cd-e1e7-4cfd-a819-e8b4c124fa80": {"__data__": {"id_": "bea604cd-e1e7-4cfd-a819-e8b4c124fa80", "embedding": null, "metadata": {"window": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. ", "original_text": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e0975843-1213-4bb8-b0f7-89433f6505f8", "node_type": "1", "metadata": {"window": "The mutual information score of\neach image is given.\n 29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. ", "original_text": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. "}, "hash": "d4b029ccc01060d9a3cd939ceb3e02ec6cea23fe4072927ad79037498f486643", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9165083b-e56a-434c-9346-206b247c42ff", "node_type": "1", "metadata": {"window": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. ", "original_text": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. "}, "hash": "ed6f130f4e579619a8ddb9bfe5acc7cb2751f55840f9b74fb9dacd3ecf61c350", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. ", "mimetype": "text/plain", "start_char_idx": 55601, "end_char_idx": 55711, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9165083b-e56a-434c-9346-206b247c42ff": {"__data__": {"id_": "9165083b-e56a-434c-9346-206b247c42ff", "embedding": null, "metadata": {"window": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. ", "original_text": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bea604cd-e1e7-4cfd-a819-e8b4c124fa80", "node_type": "1", "metadata": {"window": "29\n\nChapter 4\nConclusion\nThe main contributions from the work presented in this report are two-fold. First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. ", "original_text": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. "}, "hash": "0fdcc99525117298d8d7d80c22a481e8921632b7d7521642526f7ca2629ebe57", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "db0abb44-02b1-4900-9cea-30cb8a34eb30", "node_type": "1", "metadata": {"window": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. ", "original_text": "The first was the lack of testing data. "}, "hash": "b5c93ce26f3d63ac58b5be1c55c1a765ea226af322a92253ce55dbbe24bb660f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. ", "mimetype": "text/plain", "start_char_idx": 55711, "end_char_idx": 55810, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "db0abb44-02b1-4900-9cea-30cb8a34eb30": {"__data__": {"id_": "db0abb44-02b1-4900-9cea-30cb8a34eb30", "embedding": null, "metadata": {"window": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. ", "original_text": "The first was the lack of testing data. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9165083b-e56a-434c-9346-206b247c42ff", "node_type": "1", "metadata": {"window": "First, the object\ndetection module has been significantly improved, both in terms of speed and accuracy. Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. ", "original_text": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. "}, "hash": "ed6f130f4e579619a8ddb9bfe5acc7cb2751f55840f9b74fb9dacd3ecf61c350", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6ad3bf0f-bcab-4da1-af98-0656886f010f", "node_type": "1", "metadata": {"window": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n", "original_text": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. "}, "hash": "f78cd96ecb472a2531c378af525530ba43b7b1339025cfae93148a912b538ad7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The first was the lack of testing data. ", "mimetype": "text/plain", "start_char_idx": 55810, "end_char_idx": 55850, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6ad3bf0f-bcab-4da1-af98-0656886f010f": {"__data__": {"id_": "6ad3bf0f-bcab-4da1-af98-0656886f010f", "embedding": null, "metadata": {"window": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n", "original_text": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "db0abb44-02b1-4900-9cea-30cb8a34eb30", "node_type": "1", "metadata": {"window": "Ad-\nditionally, good foundations have been laid to facilitate further development of the project.\n Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. ", "original_text": "The first was the lack of testing data. "}, "hash": "b5c93ce26f3d63ac58b5be1c55c1a765ea226af322a92253ce55dbbe24bb660f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "50c532d3-384e-4328-9afc-5dcd8cf84de1", "node_type": "1", "metadata": {"window": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. ", "original_text": "In order to reach a signif-\nicant scale, a coordinated effort is needed. "}, "hash": "f524a92ed7d5c0d060d30d81b319eb441f306758b6170402fee2e57b65ea6b71", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. ", "mimetype": "text/plain", "start_char_idx": 55850, "end_char_idx": 56037, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "50c532d3-384e-4328-9afc-5dcd8cf84de1": {"__data__": {"id_": "50c532d3-384e-4328-9afc-5dcd8cf84de1", "embedding": null, "metadata": {"window": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. ", "original_text": "In order to reach a signif-\nicant scale, a coordinated effort is needed. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6ad3bf0f-bcab-4da1-af98-0656886f010f", "node_type": "1", "metadata": {"window": "Second, uncertainty-based active learning techniques for the moth species classifier have been\nexplored. While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n", "original_text": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. "}, "hash": "f78cd96ecb472a2531c378af525530ba43b7b1339025cfae93148a912b538ad7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "76785645-0ba2-4bdf-8141-ecfa99b51dc3", "node_type": "1", "metadata": {"window": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n", "original_text": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. "}, "hash": "a4a719f5340d492aac09d015aa8a06619baf9cfac8b055a538cd5e43a76b0c28", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In order to reach a signif-\nicant scale, a coordinated effort is needed. ", "mimetype": "text/plain", "start_char_idx": 56037, "end_char_idx": 56110, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "76785645-0ba2-4bdf-8141-ecfa99b51dc3": {"__data__": {"id_": "76785645-0ba2-4bdf-8141-ecfa99b51dc3", "embedding": null, "metadata": {"window": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n", "original_text": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "50c532d3-384e-4328-9afc-5dcd8cf84de1", "node_type": "1", "metadata": {"window": "While this have been insightful, further research is needed to integrate active learn-\ning into the pipeline. In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. ", "original_text": "In order to reach a signif-\nicant scale, a coordinated effort is needed. "}, "hash": "f524a92ed7d5c0d060d30d81b319eb441f306758b6170402fee2e57b65ea6b71", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "55be0b16-6a7a-424d-a068-f34d04f82894", "node_type": "1", "metadata": {"window": "The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. ", "original_text": "The second factor has been the lack of effective collaboration inside the\nteam. "}, "hash": "8644b1a9e52765a6fd9e92f0e347a3e4d1672694e3625c40d4011f9c4f14a8f7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. ", "mimetype": "text/plain", "start_char_idx": 56110, "end_char_idx": 56274, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "55be0b16-6a7a-424d-a068-f34d04f82894": {"__data__": {"id_": "55be0b16-6a7a-424d-a068-f34d04f82894", "embedding": null, "metadata": {"window": "The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. ", "original_text": "The second factor has been the lack of effective collaboration inside the\nteam. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "76785645-0ba2-4bdf-8141-ecfa99b51dc3", "node_type": "1", "metadata": {"window": "In conclusion, it is worth mentioning the two main factors that have\nnegatively affected progress. The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n", "original_text": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. "}, "hash": "a4a719f5340d492aac09d015aa8a06619baf9cfac8b055a538cd5e43a76b0c28", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e2afaa55-5537-4763-a075-920b698f586a", "node_type": "1", "metadata": {"window": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. ", "original_text": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n"}, "hash": "62e4c3bc3459c8ce19ac105d76c148ba583d6af98bb4e9788e40a89648f95b2d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The second factor has been the lack of effective collaboration inside the\nteam. ", "mimetype": "text/plain", "start_char_idx": 56274, "end_char_idx": 56354, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e2afaa55-5537-4763-a075-920b698f586a": {"__data__": {"id_": "e2afaa55-5537-4763-a075-920b698f586a", "embedding": null, "metadata": {"window": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. ", "original_text": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "55be0b16-6a7a-424d-a068-f34d04f82894", "node_type": "1", "metadata": {"window": "The first was the lack of testing data. While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. ", "original_text": "The second factor has been the lack of effective collaboration inside the\nteam. "}, "hash": "8644b1a9e52765a6fd9e92f0e347a3e4d1672694e3625c40d4011f9c4f14a8f7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c3663e02-759a-4398-a62f-06e0bb4b213b", "node_type": "1", "metadata": {"window": "In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n", "original_text": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. "}, "hash": "b40454f9c16d3862e9967fd49c8e745bd0cdfa54a45f7d9a33c388f317924d2a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n", "mimetype": "text/plain", "start_char_idx": 56354, "end_char_idx": 56692, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c3663e02-759a-4398-a62f-06e0bb4b213b": {"__data__": {"id_": "c3663e02-759a-4398-a62f-06e0bb4b213b", "embedding": null, "metadata": {"window": "In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n", "original_text": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e2afaa55-5537-4763-a075-920b698f586a", "node_type": "1", "metadata": {"window": "While tricks can some-\ntimes be applied to efficiently create training data \u2014as was done in this project\u2014, there is no\ngetting around the need for manual labor to build testing datasets. In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. ", "original_text": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n"}, "hash": "62e4c3bc3459c8ce19ac105d76c148ba583d6af98bb4e9788e40a89648f95b2d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4b39663f-c96f-4aec-9e40-a291061d8137", "node_type": "1", "metadata": {"window": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. ", "original_text": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n"}, "hash": "134143563f23012f8bbe1ccc6f4a149fa273818ba3e69d043e52024e52359461", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. ", "mimetype": "text/plain", "start_char_idx": 56692, "end_char_idx": 56905, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4b39663f-c96f-4aec-9e40-a291061d8137": {"__data__": {"id_": "4b39663f-c96f-4aec-9e40-a291061d8137", "embedding": null, "metadata": {"window": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. ", "original_text": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c3663e02-759a-4398-a62f-06e0bb4b213b", "node_type": "1", "metadata": {"window": "In order to reach a signif-\nicant scale, a coordinated effort is needed. Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n", "original_text": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. "}, "hash": "b40454f9c16d3862e9967fd49c8e745bd0cdfa54a45f7d9a33c388f317924d2a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e0a97f98-df2a-43a3-a15a-243ec97e6d21", "node_type": "1", "metadata": {"window": "The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n", "original_text": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. "}, "hash": "d86f7e8738ef700e63ef713f1f5e717ee89d8b87b70e259a67860a2110303ed9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n", "mimetype": "text/plain", "start_char_idx": 56905, "end_char_idx": 57040, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e0a97f98-df2a-43a3-a15a-243ec97e6d21": {"__data__": {"id_": "e0a97f98-df2a-43a3-a15a-243ec97e6d21", "embedding": null, "metadata": {"window": "The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n", "original_text": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4b39663f-c96f-4aec-9e40-a291061d8137", "node_type": "1", "metadata": {"window": "Fortunately, such an effort is currently under way\nfrom collaborators both at Mila and outside, and testing datasets will become available in the\nfollowing months. The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. ", "original_text": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n"}, "hash": "134143563f23012f8bbe1ccc6f4a149fa273818ba3e69d043e52024e52359461", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42563b7f-cbcd-4519-82d7-ca67adad7b27", "node_type": "1", "metadata": {"window": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. ", "original_text": "Assessing the causes of late pleistocene extinctions on the continents. "}, "hash": "7fe18813d9afbda7d13989dcdfd42dbb37265d7d79b112e6d6d0b7289ef7a8eb", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. ", "mimetype": "text/plain", "start_char_idx": 57040, "end_char_idx": 57135, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42563b7f-cbcd-4519-82d7-ca67adad7b27": {"__data__": {"id_": "42563b7f-cbcd-4519-82d7-ca67adad7b27", "embedding": null, "metadata": {"window": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. ", "original_text": "Assessing the causes of late pleistocene extinctions on the continents. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e0a97f98-df2a-43a3-a15a-243ec97e6d21", "node_type": "1", "metadata": {"window": "The second factor has been the lack of effective collaboration inside the\nteam. Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n", "original_text": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. "}, "hash": "d86f7e8738ef700e63ef713f1f5e717ee89d8b87b70e259a67860a2110303ed9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2bcf57ea-ff46-42d1-ad9d-99deacf38369", "node_type": "1", "metadata": {"window": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n", "original_text": "Science,\n306(5693):70\u201375, October 2004.\n"}, "hash": "031507077d1ebe8c5010c208e8ed2b4ab691414704c0798d08bf414baaa44843", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Assessing the causes of late pleistocene extinctions on the continents. ", "mimetype": "text/plain", "start_char_idx": 57135, "end_char_idx": 57207, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2bcf57ea-ff46-42d1-ad9d-99deacf38369": {"__data__": {"id_": "2bcf57ea-ff46-42d1-ad9d-99deacf38369", "embedding": null, "metadata": {"window": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n", "original_text": "Science,\n306(5693):70\u201375, October 2004.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42563b7f-cbcd-4519-82d7-ca67adad7b27", "node_type": "1", "metadata": {"window": "Considering that the goal \u2014to develop a product\u2014 is akin to the one of a start-up, and\nas collaboration is naturally more challenging when colleagues work remotely from different\nregions of the world, I believe that the team would greatly benefit from the adoption of project\ndevelopment best practices that are standard in the industry.\n 30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. ", "original_text": "Assessing the causes of late pleistocene extinctions on the continents. "}, "hash": "7fe18813d9afbda7d13989dcdfd42dbb37265d7d79b112e6d6d0b7289ef7a8eb", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "10e9d4ea-46ce-484d-949d-0b8ba30a7c43", "node_type": "1", "metadata": {"window": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. ", "original_text": "[3] Luciano Prates and S. Ivan Perez. "}, "hash": "582c064369b9c7858eb698a69db351ff5d68ab4e86b086767fd514c95e6e95c8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Science,\n306(5693):70\u201375, October 2004.\n", "mimetype": "text/plain", "start_char_idx": 57207, "end_char_idx": 57247, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "10e9d4ea-46ce-484d-949d-0b8ba30a7c43": {"__data__": {"id_": "10e9d4ea-46ce-484d-949d-0b8ba30a7c43", "embedding": null, "metadata": {"window": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. ", "original_text": "[3] Luciano Prates and S. Ivan Perez. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2bcf57ea-ff46-42d1-ad9d-99deacf38369", "node_type": "1", "metadata": {"window": "30\n\nBibliography\n[1] Richard G. Roberts, Timothy F. Flannery, Linda K. Ayliffe, Hiroyuki Yoshida, Jon M. Olley,\nGavin J. Prideaux, Geoff M. Laslett, Alexander Baynes, M. A. Smith, Rhys Jones, and\nBarton L. Smith. New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n", "original_text": "Science,\n306(5693):70\u201375, October 2004.\n"}, "hash": "031507077d1ebe8c5010c208e8ed2b4ab691414704c0798d08bf414baaa44843", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fad2e5e6-9a2c-4936-96aa-85e293c2ded1", "node_type": "1", "metadata": {"window": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. ", "original_text": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n"}, "hash": "972d3c08dddd008bdb7864542fb1b8d33bd5427ffa524fbacc2e95d894ece198", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[3] Luciano Prates and S. Ivan Perez. ", "mimetype": "text/plain", "start_char_idx": 57247, "end_char_idx": 57285, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fad2e5e6-9a2c-4936-96aa-85e293c2ded1": {"__data__": {"id_": "fad2e5e6-9a2c-4936-96aa-85e293c2ded1", "embedding": null, "metadata": {"window": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. ", "original_text": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "10e9d4ea-46ce-484d-949d-0b8ba30a7c43", "node_type": "1", "metadata": {"window": "New ages for the last australian megafauna: Continent-wide extinction\nabout 46, 000 years ago.Science, 292(5523):1888\u20131892, June 2001.\n [2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. ", "original_text": "[3] Luciano Prates and S. Ivan Perez. "}, "hash": "582c064369b9c7858eb698a69db351ff5d68ab4e86b086767fd514c95e6e95c8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "81918138-c432-473d-b792-3f072be98160", "node_type": "1", "metadata": {"window": "Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n", "original_text": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. "}, "hash": "cc0d344c3117bd780d9c98ace12185fb2b9c103e577e1e8e439a7b010ac301cd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n", "mimetype": "text/plain", "start_char_idx": 57285, "end_char_idx": 57443, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "81918138-c432-473d-b792-3f072be98160": {"__data__": {"id_": "81918138-c432-473d-b792-3f072be98160", "embedding": null, "metadata": {"window": "Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n", "original_text": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fad2e5e6-9a2c-4936-96aa-85e293c2ded1", "node_type": "1", "metadata": {"window": "[2] Anthony D. Barnosky, Paul L. Koch, Robert S. Feranec, Scott L. Wing, and Alan B. Sha-\nbel. Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. ", "original_text": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n"}, "hash": "972d3c08dddd008bdb7864542fb1b8d33bd5427ffa524fbacc2e95d894ece198", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2596d581-ed4a-400e-8142-70ce63a64ae7", "node_type": "1", "metadata": {"window": "Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. ", "original_text": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n"}, "hash": "34da2b3ee35d2c2590fcd2b9a86468f4e0ad23b9e41d698d04db055aaad72797", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. ", "mimetype": "text/plain", "start_char_idx": 57443, "end_char_idx": 57596, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2596d581-ed4a-400e-8142-70ce63a64ae7": {"__data__": {"id_": "2596d581-ed4a-400e-8142-70ce63a64ae7", "embedding": null, "metadata": {"window": "Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. ", "original_text": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "81918138-c432-473d-b792-3f072be98160", "node_type": "1", "metadata": {"window": "Assessing the causes of late pleistocene extinctions on the continents. Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n", "original_text": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. "}, "hash": "cc0d344c3117bd780d9c98ace12185fb2b9c103e577e1e8e439a7b010ac301cd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42f0b20d-734a-4f63-866b-b6ecd606d953", "node_type": "1", "metadata": {"window": "[3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? ", "original_text": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. "}, "hash": "f64e853b7fcb8a35f93a8d0e15339a4c57ef860ed3f09e2d6b0f9718a5fd5bf5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n", "mimetype": "text/plain", "start_char_idx": 57596, "end_char_idx": 57736, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42f0b20d-734a-4f63-866b-b6ecd606d953": {"__data__": {"id_": "42f0b20d-734a-4f63-866b-b6ecd606d953", "embedding": null, "metadata": {"window": "[3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? ", "original_text": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2596d581-ed4a-400e-8142-70ce63a64ae7", "node_type": "1", "metadata": {"window": "Science,\n306(5693):70\u201375, October 2004.\n [3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. ", "original_text": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n"}, "hash": "34da2b3ee35d2c2590fcd2b9a86468f4e0ad23b9e41d698d04db055aaad72797", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "460a4973-530a-436e-9f64-ac43ac154551", "node_type": "1", "metadata": {"window": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n", "original_text": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. "}, "hash": "59304967a286501d63348ab10116e8ed1a5b27f5d48d066555f43574b32f63fd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. ", "mimetype": "text/plain", "start_char_idx": 57736, "end_char_idx": 57794, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "460a4973-530a-436e-9f64-ac43ac154551": {"__data__": {"id_": "460a4973-530a-436e-9f64-ac43ac154551", "embedding": null, "metadata": {"window": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n", "original_text": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42f0b20d-734a-4f63-866b-b6ecd606d953", "node_type": "1", "metadata": {"window": "[3] Luciano Prates and S. Ivan Perez. Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? ", "original_text": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. "}, "hash": "f64e853b7fcb8a35f93a8d0e15339a4c57ef860ed3f09e2d6b0f9718a5fd5bf5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5f534f88-e5ad-49e1-bd89-b799513f4c00", "node_type": "1", "metadata": {"window": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. ", "original_text": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n"}, "hash": "f405ac6e7a9e571de2baa511141a5a5581eefdf9bf52232000bf9f472055460e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. ", "mimetype": "text/plain", "start_char_idx": 57794, "end_char_idx": 57911, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5f534f88-e5ad-49e1-bd89-b799513f4c00": {"__data__": {"id_": "5f534f88-e5ad-49e1-bd89-b799513f4c00", "embedding": null, "metadata": {"window": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. ", "original_text": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "460a4973-530a-436e-9f64-ac43ac154551", "node_type": "1", "metadata": {"window": "Late pleistocene south american megafaunal extinctions\nassociated with rise of fishtail points and human population.Nature Communications, 12(1),\nApril 2021.\n [4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n", "original_text": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. "}, "hash": "59304967a286501d63348ab10116e8ed1a5b27f5d48d066555f43574b32f63fd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1e602cba-2b97-4c95-adf8-23e950e42dd9", "node_type": "1", "metadata": {"window": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n", "original_text": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. "}, "hash": "485cf4ccb3638e139ef026d620f5a5992a2dd14d974c3e6fc88afbed7cec72dc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n", "mimetype": "text/plain", "start_char_idx": 57911, "end_char_idx": 57989, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1e602cba-2b97-4c95-adf8-23e950e42dd9": {"__data__": {"id_": "1e602cba-2b97-4c95-adf8-23e950e42dd9", "embedding": null, "metadata": {"window": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n", "original_text": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5f534f88-e5ad-49e1-bd89-b799513f4c00", "node_type": "1", "metadata": {"window": "[4] Sander van der Kaars, Gifford H. Miller, Chris S. M. Turney, Ellyn J. Cook, Dirk N\u00fcrnberg,\nJoachim Sch\u00f6nfeld, A. Peter Kershaw, and Scott J. Lehman. Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. ", "original_text": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n"}, "hash": "f405ac6e7a9e571de2baa511141a5a5581eefdf9bf52232000bf9f472055460e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9c397935-cbdb-4af5-ae96-623262270cf3", "node_type": "1", "metadata": {"window": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. ", "original_text": "The sixth mass extinction: fact,\nfiction or speculation? "}, "hash": "d9a3f2b0ae245a5cda9ab8b6a14875ce5a49c7bfa2925214286c05078cb0d90c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. ", "mimetype": "text/plain", "start_char_idx": 57989, "end_char_idx": 58049, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9c397935-cbdb-4af5-ae96-623262270cf3": {"__data__": {"id_": "9c397935-cbdb-4af5-ae96-623262270cf3", "embedding": null, "metadata": {"window": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. ", "original_text": "The sixth mass extinction: fact,\nfiction or speculation? "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1e602cba-2b97-4c95-adf8-23e950e42dd9", "node_type": "1", "metadata": {"window": "Humans rather than climate\nthe primary cause of pleistocene megafaunal extinction in australia.Nature Communica-\ntions, 8(1), January 2017.\n [5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n", "original_text": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. "}, "hash": "485cf4ccb3638e139ef026d620f5a5992a2dd14d974c3e6fc88afbed7cec72dc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a", "node_type": "1", "metadata": {"window": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n", "original_text": "Biological Reviews, 97(2):640\u2013663, January 2022.\n"}, "hash": "179f521753d66bb5f16b4e68339679f9bc1b63b9608ebe835fdc7317765fae85", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The sixth mass extinction: fact,\nfiction or speculation? ", "mimetype": "text/plain", "start_char_idx": 58049, "end_char_idx": 58106, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a": {"__data__": {"id_": "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a", "embedding": null, "metadata": {"window": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n", "original_text": "Biological Reviews, 97(2):640\u2013663, January 2022.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9c397935-cbdb-4af5-ae96-623262270cf3", "node_type": "1", "metadata": {"window": "[5] Gerardo Ceballos, Paul R. Ehrlich, and Rodolfo Dirzo. Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. ", "original_text": "The sixth mass extinction: fact,\nfiction or speculation? "}, "hash": "d9a3f2b0ae245a5cda9ab8b6a14875ce5a49c7bfa2925214286c05078cb0d90c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3a1f4ce3-1c43-4e97-8d11-fa85315fd754", "node_type": "1", "metadata": {"window": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. ", "original_text": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. "}, "hash": "399e0db4f12cee25346e3efd40d5712d2cc357ad2faf2c4e913d211c521ef7a7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Biological Reviews, 97(2):640\u2013663, January 2022.\n", "mimetype": "text/plain", "start_char_idx": 58106, "end_char_idx": 58155, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3a1f4ce3-1c43-4e97-8d11-fa85315fd754": {"__data__": {"id_": "3a1f4ce3-1c43-4e97-8d11-fa85315fd754", "embedding": null, "metadata": {"window": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. ", "original_text": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a", "node_type": "1", "metadata": {"window": "Biological annihilation via the\nongoing sixth mass extinction signaled by vertebrate population losses and declines. Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n", "original_text": "Biological Reviews, 97(2):640\u2013663, January 2022.\n"}, "hash": "179f521753d66bb5f16b4e68339679f9bc1b63b9608ebe835fdc7317765fae85", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "54ffc58a-716e-4ca2-a1fe-8c09ae37222b", "node_type": "1", "metadata": {"window": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. ", "original_text": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n"}, "hash": "fc3949213a87e3c3dd333e2eb49154b9d1eb0a8db0d7b1687c4b95d91130a2e8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. ", "mimetype": "text/plain", "start_char_idx": 58155, "end_char_idx": 58378, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "54ffc58a-716e-4ca2-a1fe-8c09ae37222b": {"__data__": {"id_": "54ffc58a-716e-4ca2-a1fe-8c09ae37222b", "embedding": null, "metadata": {"window": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. ", "original_text": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3a1f4ce3-1c43-4e97-8d11-fa85315fd754", "node_type": "1", "metadata": {"window": "Pro-\nceedings of the National Academy of Sciences, 114(30):E6089\u2013E6096, 2017.\n [6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. ", "original_text": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. "}, "hash": "399e0db4f12cee25346e3efd40d5712d2cc357ad2faf2c4e913d211c521ef7a7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "868606b4-3614-4597-be8f-c25b3da8b6f0", "node_type": "1", "metadata": {"window": "The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n", "original_text": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. "}, "hash": "06b4f6cb0b227613e3984febfc9d56572e87e7378d6a106d80923c00a0905700", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n", "mimetype": "text/plain", "start_char_idx": 58378, "end_char_idx": 58471, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "868606b4-3614-4597-be8f-c25b3da8b6f0": {"__data__": {"id_": "868606b4-3614-4597-be8f-c25b3da8b6f0", "embedding": null, "metadata": {"window": "The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n", "original_text": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "54ffc58a-716e-4ca2-a1fe-8c09ae37222b", "node_type": "1", "metadata": {"window": "[6] Robert H. Cowie, Philippe Bouchet, and Beno\u00eet Fontaine. The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. ", "original_text": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n"}, "hash": "fc3949213a87e3c3dd333e2eb49154b9d1eb0a8db0d7b1687c4b95d91130a2e8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a", "node_type": "1", "metadata": {"window": "Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. ", "original_text": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n"}, "hash": "73fd2625b325a9c0152758ebd37361357f2b96edd90a8f25ff90d24f4c061613", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. ", "mimetype": "text/plain", "start_char_idx": 58471, "end_char_idx": 58577, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a": {"__data__": {"id_": "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a", "embedding": null, "metadata": {"window": "Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. ", "original_text": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "868606b4-3614-4597-be8f-c25b3da8b6f0", "node_type": "1", "metadata": {"window": "The sixth mass extinction: fact,\nfiction or speculation? Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n", "original_text": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. "}, "hash": "06b4f6cb0b227613e3984febfc9d56572e87e7378d6a106d80923c00a0905700", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "84475cf5-b042-4415-9527-eb8121b9ea99", "node_type": "1", "metadata": {"window": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. ", "original_text": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. "}, "hash": "404bc60db1ee62eda6692110fb1d11e6b8ba8c0ab60a1b16925c4245c058f44d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n", "mimetype": "text/plain", "start_char_idx": 58577, "end_char_idx": 58728, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "84475cf5-b042-4415-9527-eb8121b9ea99": {"__data__": {"id_": "84475cf5-b042-4415-9527-eb8121b9ea99", "embedding": null, "metadata": {"window": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. ", "original_text": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a", "node_type": "1", "metadata": {"window": "Biological Reviews, 97(2):640\u2013663, January 2022.\n [7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. ", "original_text": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n"}, "hash": "73fd2625b325a9c0152758ebd37361357f2b96edd90a8f25ff90d24f4c061613", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c6621a32-a894-4955-9a4d-755481e70816", "node_type": "1", "metadata": {"window": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n", "original_text": "Patterns,\ncauses, and consequences of anthropocene defaunation. "}, "hash": "935fcb797ba0b804cebd6473f39d444c4d30874ab5cea7ed78f9d2ea142daa82", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. ", "mimetype": "text/plain", "start_char_idx": 58728, "end_char_idx": 58805, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c6621a32-a894-4955-9a4d-755481e70816": {"__data__": {"id_": "c6621a32-a894-4955-9a4d-755481e70816", "embedding": null, "metadata": {"window": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n", "original_text": "Patterns,\ncauses, and consequences of anthropocene defaunation. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "84475cf5-b042-4415-9527-eb8121b9ea99", "node_type": "1", "metadata": {"window": "[7] Anthony D. Barnosky, Nicholas Matzke, Susumu Tomiya, Guinevere O. U. Wogan, Brian\nSwartz, Tiago B. Quental, Charles Marshall, Jenny L. McGuire, Emily L. Lindsey, Kaitlin C.\nMaguire, Ben Mersey, and Elizabeth A. Ferrer. Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. ", "original_text": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. "}, "hash": "404bc60db1ee62eda6692110fb1d11e6b8ba8c0ab60a1b16925c4245c058f44d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "85784adb-42aa-4c09-8217-5f1fc3a0923b", "node_type": "1", "metadata": {"window": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. ", "original_text": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n"}, "hash": "22ca8e3a4760165b34b5b8e8659da39a2e5dde54c0b20a887fe00669c6df5405", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Patterns,\ncauses, and consequences of anthropocene defaunation. ", "mimetype": "text/plain", "start_char_idx": 58805, "end_char_idx": 58869, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "85784adb-42aa-4c09-8217-5f1fc3a0923b": {"__data__": {"id_": "85784adb-42aa-4c09-8217-5f1fc3a0923b", "embedding": null, "metadata": {"window": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. ", "original_text": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c6621a32-a894-4955-9a4d-755481e70816", "node_type": "1", "metadata": {"window": "Has the earth\u2019s sixth mass extinction al-\nready arrived?Nature, 471(7336):51\u201357, March 2011.\n [8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n", "original_text": "Patterns,\ncauses, and consequences of anthropocene defaunation. "}, "hash": "935fcb797ba0b804cebd6473f39d444c4d30874ab5cea7ed78f9d2ea142daa82", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "93dbb6bf-40a7-4ffc-9e39-62061ae9274c", "node_type": "1", "metadata": {"window": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? ", "original_text": "[10] Arthur D. Chapman. "}, "hash": "5d1f0bafc469ec0ff8c0c993c6f45d190d8038246511c57835b0ea1f1206fd4e", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n", "mimetype": "text/plain", "start_char_idx": 58869, "end_char_idx": 58943, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "93dbb6bf-40a7-4ffc-9e39-62061ae9274c": {"__data__": {"id_": "93dbb6bf-40a7-4ffc-9e39-62061ae9274c", "embedding": null, "metadata": {"window": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? ", "original_text": "[10] Arthur D. Chapman. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "85784adb-42aa-4c09-8217-5f1fc3a0923b", "node_type": "1", "metadata": {"window": "[8] Aelys M. Humphreys, Rafa\u00ebl Govaerts, Sarah Z. Ficinski, Eimear Nic Lughadha, and\nMaria S. Vorontsova. Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. ", "original_text": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n"}, "hash": "22ca8e3a4760165b34b5b8e8659da39a2e5dde54c0b20a887fe00669c6df5405", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "42c5c024-e12f-4a21-9ce0-919a80fe387a", "node_type": "1", "metadata": {"window": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n", "original_text": "Numbers of living species in australia and the world. "}, "hash": "e83930d1c0270fe8bfbc36b266f0054ed29609f354cca28e167226d1eea0b0fd", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[10] Arthur D. Chapman. ", "mimetype": "text/plain", "start_char_idx": 58943, "end_char_idx": 58967, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "42c5c024-e12f-4a21-9ce0-919a80fe387a": {"__data__": {"id_": "42c5c024-e12f-4a21-9ce0-919a80fe387a", "embedding": null, "metadata": {"window": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n", "original_text": "Numbers of living species in australia and the world. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "93dbb6bf-40a7-4ffc-9e39-62061ae9274c", "node_type": "1", "metadata": {"window": "Global dataset shows geography and life form predict modern plant\nextinction and rediscovery.Nature Ecology & Evolution, 3(7):1043\u20131047, June 2019.\n [9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? ", "original_text": "[10] Arthur D. Chapman. "}, "hash": "5d1f0bafc469ec0ff8c0c993c6f45d190d8038246511c57835b0ea1f1206fd4e", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "24f57b00-4dfa-437a-8b73-2bfc6ae90afa", "node_type": "1", "metadata": {"window": "Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. ", "original_text": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n"}, "hash": "88d9e3c2e586da2b0889cbff5ed39ad0ecfc24cec8915ad54da4e06b8224adde", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Numbers of living species in australia and the world. ", "mimetype": "text/plain", "start_char_idx": 58967, "end_char_idx": 59021, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "24f57b00-4dfa-437a-8b73-2bfc6ae90afa": {"__data__": {"id_": "24f57b00-4dfa-437a-8b73-2bfc6ae90afa", "embedding": null, "metadata": {"window": "Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. ", "original_text": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "42c5c024-e12f-4a21-9ce0-919a80fe387a", "node_type": "1", "metadata": {"window": "[9] Hillary S. Young, Douglas J. McCauley, Mauro Galetti, and Rodolfo Dirzo. Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n", "original_text": "Numbers of living species in australia and the world. "}, "hash": "e83930d1c0270fe8bfbc36b266f0054ed29609f354cca28e167226d1eea0b0fd", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "05271871-334c-43b9-99ee-23accab4be00", "node_type": "1", "metadata": {"window": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n", "original_text": "[11] Nigel E. Stork. "}, "hash": "d145c9dcd579669dce9d19bb2f6897f10cbb5c1a783501349cabd938c330201c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n", "mimetype": "text/plain", "start_char_idx": 59021, "end_char_idx": 59091, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "05271871-334c-43b9-99ee-23accab4be00": {"__data__": {"id_": "05271871-334c-43b9-99ee-23accab4be00", "embedding": null, "metadata": {"window": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n", "original_text": "[11] Nigel E. Stork. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "24f57b00-4dfa-437a-8b73-2bfc6ae90afa", "node_type": "1", "metadata": {"window": "Patterns,\ncauses, and consequences of anthropocene defaunation. Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. ", "original_text": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n"}, "hash": "88d9e3c2e586da2b0889cbff5ed39ad0ecfc24cec8915ad54da4e06b8224adde", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "845821ef-55a0-447a-983c-e931e9eadc39", "node_type": "1", "metadata": {"window": "[10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. ", "original_text": "How many species of insects and other terrestrial arthropods are there on\nearth? "}, "hash": "bf3f647c5cec3768f6e6506dbfd35999dc3b051ef49107816f61d2c2a3b3563f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[11] Nigel E. Stork. ", "mimetype": "text/plain", "start_char_idx": 59091, "end_char_idx": 59112, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "845821ef-55a0-447a-983c-e931e9eadc39": {"__data__": {"id_": "845821ef-55a0-447a-983c-e931e9eadc39", "embedding": null, "metadata": {"window": "[10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. ", "original_text": "How many species of insects and other terrestrial arthropods are there on\nearth? "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "05271871-334c-43b9-99ee-23accab4be00", "node_type": "1", "metadata": {"window": "Annual Review of Ecology, Evo-\nlution, and Systematics, 47:333\u2013358, 2016.\n [10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n", "original_text": "[11] Nigel E. Stork. "}, "hash": "d145c9dcd579669dce9d19bb2f6897f10cbb5c1a783501349cabd938c330201c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "17f505de-eba1-4fc0-9d20-a5db5f1f3437", "node_type": "1", "metadata": {"window": "Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n", "original_text": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n"}, "hash": "7c52cb2a49e76c9fc81c32c385d3278b5a40eb1ba0bb09b67f490c61a1dc5537", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "How many species of insects and other terrestrial arthropods are there on\nearth? ", "mimetype": "text/plain", "start_char_idx": 59112, "end_char_idx": 59193, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "17f505de-eba1-4fc0-9d20-a5db5f1f3437": {"__data__": {"id_": "17f505de-eba1-4fc0-9d20-a5db5f1f3437", "embedding": null, "metadata": {"window": "Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n", "original_text": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "845821ef-55a0-447a-983c-e931e9eadc39", "node_type": "1", "metadata": {"window": "[10] Arthur D. Chapman. Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. ", "original_text": "How many species of insects and other terrestrial arthropods are there on\nearth? "}, "hash": "bf3f647c5cec3768f6e6506dbfd35999dc3b051ef49107816f61d2c2a3b3563f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f0af8857-c8d0-4783-9e30-b3dc17316850", "node_type": "1", "metadata": {"window": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. ", "original_text": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. "}, "hash": "fdef1c2dc45915a7a88bce43a1e3f144253933536f10e053fbde5d851ea60795", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n", "mimetype": "text/plain", "start_char_idx": 59193, "end_char_idx": 59249, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f0af8857-c8d0-4783-9e30-b3dc17316850": {"__data__": {"id_": "f0af8857-c8d0-4783-9e30-b3dc17316850", "embedding": null, "metadata": {"window": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. ", "original_text": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "17f505de-eba1-4fc0-9d20-a5db5f1f3437", "node_type": "1", "metadata": {"window": "Numbers of living species in australia and the world. Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n", "original_text": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n"}, "hash": "7c52cb2a49e76c9fc81c32c385d3278b5a40eb1ba0bb09b67f490c61a1dc5537", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8fc3e581-2e27-4273-9f62-b610581d35c8", "node_type": "1", "metadata": {"window": "[11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. ", "original_text": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n"}, "hash": "682daa2c8d25b27087a17eec536ce1b190e44c331d1b1b8bd4ccea2ee7674e78", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. ", "mimetype": "text/plain", "start_char_idx": 59249, "end_char_idx": 59447, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8fc3e581-2e27-4273-9f62-b610581d35c8": {"__data__": {"id_": "8fc3e581-2e27-4273-9f62-b610581d35c8", "embedding": null, "metadata": {"window": "[11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. ", "original_text": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f0af8857-c8d0-4783-9e30-b3dc17316850", "node_type": "1", "metadata": {"window": "Technical\nreport, Australian Biodiversity Information Services, 2009.\n [11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. ", "original_text": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. "}, "hash": "fdef1c2dc45915a7a88bce43a1e3f144253933536f10e053fbde5d851ea60795", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "bfd51746-8f4f-4e5a-9cb9-1891f4612a37", "node_type": "1", "metadata": {"window": "How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n", "original_text": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. "}, "hash": "5781d43a478be4b62aff279bc151a2bc72893bd390a8eaccd265aa3ae249f6b9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n", "mimetype": "text/plain", "start_char_idx": 59447, "end_char_idx": 59581, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "bfd51746-8f4f-4e5a-9cb9-1891f4612a37": {"__data__": {"id_": "bfd51746-8f4f-4e5a-9cb9-1891f4612a37", "embedding": null, "metadata": {"window": "How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n", "original_text": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8fc3e581-2e27-4273-9f62-b610581d35c8", "node_type": "1", "metadata": {"window": "[11] Nigel E. Stork. How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. ", "original_text": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n"}, "hash": "682daa2c8d25b27087a17eec536ce1b190e44c331d1b1b8bd4ccea2ee7674e78", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "63a946f5-614e-4f0b-a3d9-4d7eba44d239", "node_type": "1", "metadata": {"window": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. ", "original_text": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n"}, "hash": "f059ee0698ba6ce894ac3f869a1b18798ec085e61d8caf7b1845d4a4e1e3b9fc", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. ", "mimetype": "text/plain", "start_char_idx": 59581, "end_char_idx": 59912, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "63a946f5-614e-4f0b-a3d9-4d7eba44d239": {"__data__": {"id_": "63a946f5-614e-4f0b-a3d9-4d7eba44d239", "embedding": null, "metadata": {"window": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. ", "original_text": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "bfd51746-8f4f-4e5a-9cb9-1891f4612a37", "node_type": "1", "metadata": {"window": "How many species of insects and other terrestrial arthropods are there on\nearth? Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n", "original_text": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. "}, "hash": "5781d43a478be4b62aff279bc151a2bc72893bd390a8eaccd265aa3ae249f6b9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fb0eaa3c-59a6-4896-b0f1-b139b803fc82", "node_type": "1", "metadata": {"window": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? ", "original_text": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. "}, "hash": "f65dbe242d5dcba40c986639de0cbf5e8244ccea52c2d2ef8e9981d6d1cd5b56", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n", "mimetype": "text/plain", "start_char_idx": 59912, "end_char_idx": 60041, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fb0eaa3c-59a6-4896-b0f1-b139b803fc82": {"__data__": {"id_": "fb0eaa3c-59a6-4896-b0f1-b139b803fc82", "embedding": null, "metadata": {"window": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? ", "original_text": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "63a946f5-614e-4f0b-a3d9-4d7eba44d239", "node_type": "1", "metadata": {"window": "Annual Review of Entomology, 63(1):31\u201345, January 2018.\n 31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. ", "original_text": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n"}, "hash": "f059ee0698ba6ce894ac3f869a1b18798ec085e61d8caf7b1845d4a4e1e3b9fc", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8fd2084a-81e6-4942-a108-fd4194781dd3", "node_type": "1", "metadata": {"window": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n", "original_text": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. "}, "hash": "585c8ce186744f3b522408ef8d88432d392f52762559658e3f23238270f6f277", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. ", "mimetype": "text/plain", "start_char_idx": 60041, "end_char_idx": 60163, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8fd2084a-81e6-4942-a108-fd4194781dd3": {"__data__": {"id_": "8fd2084a-81e6-4942-a108-fd4194781dd3", "embedding": null, "metadata": {"window": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n", "original_text": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fb0eaa3c-59a6-4896-b0f1-b139b803fc82", "node_type": "1", "metadata": {"window": "31\n\n[12] Caspar A. Hallmann, Martin Sorg, Eelke Jongejans, Henk Siepel, Nick Hofland, Heinz\nSchwan, Werner Stenmans, Andreas M\u00fcller, Hubert Sumser, Thomas H\u00f6rren, Dave Goul-\nson, and Hans de Kroon. More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? ", "original_text": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. "}, "hash": "f65dbe242d5dcba40c986639de0cbf5e8244ccea52c2d2ef8e9981d6d1cd5b56", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "79e1410d-f31a-4a94-ad26-af8fe015f2f0", "node_type": "1", "metadata": {"window": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . ", "original_text": "Science, 368(6489):417\u2013420, April 2020.\n"}, "hash": "23031f366bef9406c66e93dc9f23567a6ce46c340ada0861c04be9d6a6c65471", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. ", "mimetype": "text/plain", "start_char_idx": 60163, "end_char_idx": 60256, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "79e1410d-f31a-4a94-ad26-af8fe015f2f0": {"__data__": {"id_": "79e1410d-f31a-4a94-ad26-af8fe015f2f0", "embedding": null, "metadata": {"window": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . ", "original_text": "Science, 368(6489):417\u2013420, April 2020.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8fd2084a-81e6-4942-a108-fd4194781dd3", "node_type": "1", "metadata": {"window": "More than 75 percent decline over 27 years in total flying insect\nbiomass in protected areas.PLOS ONE, 12(10):e0185809, October 2017.\n [13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n", "original_text": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. "}, "hash": "585c8ce186744f3b522408ef8d88432d392f52762559658e3f23238270f6f277", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b50a540f-c634-480e-aa83-fc30b1fd5f25", "node_type": "1", "metadata": {"window": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. ", "original_text": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. "}, "hash": "ebdc63cbb9ecaa118770dfb455605eb5afbdbb5926befa30259479b783754d0a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Science, 368(6489):417\u2013420, April 2020.\n", "mimetype": "text/plain", "start_char_idx": 60256, "end_char_idx": 60296, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b50a540f-c634-480e-aa83-fc30b1fd5f25": {"__data__": {"id_": "b50a540f-c634-480e-aa83-fc30b1fd5f25", "embedding": null, "metadata": {"window": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. ", "original_text": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "79e1410d-f31a-4a94-ad26-af8fe015f2f0", "node_type": "1", "metadata": {"window": "[13] Sebastian Seibold, Martin M. Gossner, Nadja K. Simons, Nico Bl\u00fcthgen, J\u00f6rg M\u00fcller, Di-\ndem Ambarl\u0131, Christian Ammer, J\u00fcrgen Bauhus, Markus Fischer, Jan C. Habel, Karl Ed-\nuard Linsenmair, Thomas Nauss, Caterina Penone, Daniel Prati, Peter Schall, Ernst-Detlef\nSchulze, Juliane Vogt, Stephan W\u00f6llauer, and Wolfgang W. Weisser. Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . ", "original_text": "Science, 368(6489):417\u2013420, April 2020.\n"}, "hash": "23031f366bef9406c66e93dc9f23567a6ce46c340ada0861c04be9d6a6c65471", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "4d8265b9-81d0-456b-a1f0-53e01195baea", "node_type": "1", "metadata": {"window": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. ", "original_text": "Is the\ninsect apocalypse upon us? "}, "hash": "c75b539a0f7e2898571c14cd05edf4aabbe0b2f8bae68abf1d3318a7564f79ba", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. ", "mimetype": "text/plain", "start_char_idx": 60296, "end_char_idx": 60464, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "4d8265b9-81d0-456b-a1f0-53e01195baea": {"__data__": {"id_": "4d8265b9-81d0-456b-a1f0-53e01195baea", "embedding": null, "metadata": {"window": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. ", "original_text": "Is the\ninsect apocalypse upon us? "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b50a540f-c634-480e-aa83-fc30b1fd5f25", "node_type": "1", "metadata": {"window": "Arthropod decline in\ngrasslands and forests is associated with landscape-level drivers.Nature, 574(7780):671\u2013\n674, October 2019.\n [14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. ", "original_text": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. "}, "hash": "ebdc63cbb9ecaa118770dfb455605eb5afbdbb5926befa30259479b783754d0a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "86d9973f-21b0-4ebf-b925-02dad85aa679", "node_type": "1", "metadata": {"window": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n", "original_text": "how to find out.Biological Conservation, 241:108327, 2020.\n"}, "hash": "0c016d539e7bbc5dcce3ae507a7e675ef54976abc888682db500714059a66442", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Is the\ninsect apocalypse upon us? ", "mimetype": "text/plain", "start_char_idx": 60464, "end_char_idx": 60498, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "86d9973f-21b0-4ebf-b925-02dad85aa679": {"__data__": {"id_": "86d9973f-21b0-4ebf-b925-02dad85aa679", "embedding": null, "metadata": {"window": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n", "original_text": "how to find out.Biological Conservation, 241:108327, 2020.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "4d8265b9-81d0-456b-a1f0-53e01195baea", "node_type": "1", "metadata": {"window": "[14] Roel van Klink, Diana E. Bowler, Konstantin B. Gongalsky, Ann B. Swengel, Alessandro\nGentile, and Jonathan M. Chase. Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. ", "original_text": "Is the\ninsect apocalypse upon us? "}, "hash": "c75b539a0f7e2898571c14cd05edf4aabbe0b2f8bae68abf1d3318a7564f79ba", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "1440f2be-e94f-4f2c-a37b-67fd11eb46de", "node_type": "1", "metadata": {"window": "Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. ", "original_text": "[16] G. W. Hopkins and R. P . "}, "hash": "bedfeb4d6e129deac08ba5e0d8d1ca5dfe03f6496d990206a53eb274c3a5a3fe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "how to find out.Biological Conservation, 241:108327, 2020.\n", "mimetype": "text/plain", "start_char_idx": 60498, "end_char_idx": 60557, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "1440f2be-e94f-4f2c-a37b-67fd11eb46de": {"__data__": {"id_": "1440f2be-e94f-4f2c-a37b-67fd11eb46de", "embedding": null, "metadata": {"window": "Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. ", "original_text": "[16] G. W. Hopkins and R. P . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "86d9973f-21b0-4ebf-b925-02dad85aa679", "node_type": "1", "metadata": {"window": "Meta-analysis reveals declines in terrestrial but increases\nin freshwater insect abundances. Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n", "original_text": "how to find out.Biological Conservation, 241:108327, 2020.\n"}, "hash": "0c016d539e7bbc5dcce3ae507a7e675ef54976abc888682db500714059a66442", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8090ba14-7f38-4daf-b862-09e90de0e046", "node_type": "1", "metadata": {"window": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. ", "original_text": "Freckleton. "}, "hash": "f5906f72d6361b358e02350aa2e53fef81460eb0702640cc68c4617539118980", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[16] G. W. Hopkins and R. P . ", "mimetype": "text/plain", "start_char_idx": 60557, "end_char_idx": 60587, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8090ba14-7f38-4daf-b862-09e90de0e046": {"__data__": {"id_": "8090ba14-7f38-4daf-b862-09e90de0e046", "embedding": null, "metadata": {"window": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. ", "original_text": "Freckleton. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "1440f2be-e94f-4f2c-a37b-67fd11eb46de", "node_type": "1", "metadata": {"window": "Science, 368(6489):417\u2013420, April 2020.\n [15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. ", "original_text": "[16] G. W. Hopkins and R. P . "}, "hash": "bedfeb4d6e129deac08ba5e0d8d1ca5dfe03f6496d990206a53eb274c3a5a3fe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c0d4cb8b-7723-4251-8957-2d8d906cbc46", "node_type": "1", "metadata": {"window": "Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n", "original_text": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. "}, "hash": "dc29ce5714fc9e58faa6032a4d55b9aa2d96080d492f241a0543fb972234dec2", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Freckleton. ", "mimetype": "text/plain", "start_char_idx": 60587, "end_char_idx": 60599, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c0d4cb8b-7723-4251-8957-2d8d906cbc46": {"__data__": {"id_": "c0d4cb8b-7723-4251-8957-2d8d906cbc46", "embedding": null, "metadata": {"window": "Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n", "original_text": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8090ba14-7f38-4daf-b862-09e90de0e046", "node_type": "1", "metadata": {"window": "[15] Graham A. Montgomery, Robert R. Dunn, Richard Fox, Eelke Jongejans, Simon R. Leather,\nManu E. Saunders, Chris R. Shortall, Morgan W. Tingley, and David L. Wagner. Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. ", "original_text": "Freckleton. "}, "hash": "f5906f72d6361b358e02350aa2e53fef81460eb0702640cc68c4617539118980", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6d802a6d-3f8e-4591-8fda-adc517463116", "node_type": "1", "metadata": {"window": "how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. ", "original_text": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n"}, "hash": "65b368c8396fc4f1b28f50aae7b74b129cd62964b86a490cca9dcdd00a267b9d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. ", "mimetype": "text/plain", "start_char_idx": 60599, "end_char_idx": 60695, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6d802a6d-3f8e-4591-8fda-adc517463116": {"__data__": {"id_": "6d802a6d-3f8e-4591-8fda-adc517463116", "embedding": null, "metadata": {"window": "how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. ", "original_text": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c0d4cb8b-7723-4251-8957-2d8d906cbc46", "node_type": "1", "metadata": {"window": "Is the\ninsect apocalypse upon us? how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n", "original_text": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. "}, "hash": "dc29ce5714fc9e58faa6032a4d55b9aa2d96080d492f241a0543fb972234dec2", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6b9e9e20-907c-4eea-a6f7-db8536fafdb7", "node_type": "1", "metadata": {"window": "[16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. ", "original_text": "[17] Erica Fleishman and Dennis D. Murphy. "}, "hash": "53d4c087eecdaa04b3be1c3fa306206581c5dd9b13848b2e17827b3380f42ee6", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n", "mimetype": "text/plain", "start_char_idx": 60695, "end_char_idx": 60743, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6b9e9e20-907c-4eea-a6f7-db8536fafdb7": {"__data__": {"id_": "6b9e9e20-907c-4eea-a6f7-db8536fafdb7", "embedding": null, "metadata": {"window": "[16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. ", "original_text": "[17] Erica Fleishman and Dennis D. Murphy. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6d802a6d-3f8e-4591-8fda-adc517463116", "node_type": "1", "metadata": {"window": "how to find out.Biological Conservation, 241:108327, 2020.\n [16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. ", "original_text": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n"}, "hash": "65b368c8396fc4f1b28f50aae7b74b129cd62964b86a490cca9dcdd00a267b9d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a", "node_type": "1", "metadata": {"window": "Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n", "original_text": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. "}, "hash": "3d8621257b1162d1464341adf723228f498c866a6d9d2ee4e65ba133c3e528ee", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[17] Erica Fleishman and Dennis D. Murphy. ", "mimetype": "text/plain", "start_char_idx": 60743, "end_char_idx": 60786, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a": {"__data__": {"id_": "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a", "embedding": null, "metadata": {"window": "Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n", "original_text": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6b9e9e20-907c-4eea-a6f7-db8536fafdb7", "node_type": "1", "metadata": {"window": "[16] G. W. Hopkins and R. P . Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. ", "original_text": "[17] Erica Fleishman and Dennis D. Murphy. "}, "hash": "53d4c087eecdaa04b3be1c3fa306206581c5dd9b13848b2e17827b3380f42ee6", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a4b48e49-421d-4f99-a9bb-a6b1513b5a86", "node_type": "1", "metadata": {"window": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. ", "original_text": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n"}, "hash": "9324c653416f86fcf8352649db44dd94b859d47f8724615b1957fe6315bcbcad", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. ", "mimetype": "text/plain", "start_char_idx": 60786, "end_char_idx": 60891, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a4b48e49-421d-4f99-a9bb-a6b1513b5a86": {"__data__": {"id_": "a4b48e49-421d-4f99-a9bb-a6b1513b5a86", "embedding": null, "metadata": {"window": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. ", "original_text": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a", "node_type": "1", "metadata": {"window": "Freckleton. Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n", "original_text": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. "}, "hash": "3d8621257b1162d1464341adf723228f498c866a6d9d2ee4e65ba133c3e528ee", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6bd47f03-3543-4d87-9eca-4c146c8476d5", "node_type": "1", "metadata": {"window": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. ", "original_text": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. "}, "hash": "797fecd90a349326e891a77c165d58513bf41262b1a111e9163067166281d8a5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n", "mimetype": "text/plain", "start_char_idx": 60891, "end_char_idx": 60944, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6bd47f03-3543-4d87-9eca-4c146c8476d5": {"__data__": {"id_": "6bd47f03-3543-4d87-9eca-4c146c8476d5", "embedding": null, "metadata": {"window": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. ", "original_text": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a4b48e49-421d-4f99-a9bb-a6b1513b5a86", "node_type": "1", "metadata": {"window": "Declines in the numbers of amateur and professional\ntaxonomists: implications for conservation. Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. ", "original_text": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n"}, "hash": "9324c653416f86fcf8352649db44dd94b859d47f8724615b1957fe6315bcbcad", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0c6d92c2-9c60-4670-805c-5ab17c5de6b7", "node_type": "1", "metadata": {"window": "[17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n", "original_text": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. "}, "hash": "a491321023c025a1eac57f3a431f8de87ff4006c14c65f589229c7c6c09073b8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. ", "mimetype": "text/plain", "start_char_idx": 60944, "end_char_idx": 61055, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0c6d92c2-9c60-4670-805c-5ab17c5de6b7": {"__data__": {"id_": "0c6d92c2-9c60-4670-805c-5ab17c5de6b7", "embedding": null, "metadata": {"window": "[17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n", "original_text": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6bd47f03-3543-4d87-9eca-4c146c8476d5", "node_type": "1", "metadata": {"window": "Animal Conservation, 5(3):245\u2013249, August\n2002.\n [17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. ", "original_text": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. "}, "hash": "797fecd90a349326e891a77c165d58513bf41262b1a111e9163067166281d8a5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "ec17855c-fbc3-4722-bdb6-df7f56be2c99", "node_type": "1", "metadata": {"window": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. ", "original_text": "Sensors, 21(2), 2021.\n"}, "hash": "767ed6106b896a70fd9b743fcd3589fb9a3f4a8a1457f9df45563f974309e98c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. ", "mimetype": "text/plain", "start_char_idx": 61055, "end_char_idx": 61166, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "ec17855c-fbc3-4722-bdb6-df7f56be2c99": {"__data__": {"id_": "ec17855c-fbc3-4722-bdb6-df7f56be2c99", "embedding": null, "metadata": {"window": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. ", "original_text": "Sensors, 21(2), 2021.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0c6d92c2-9c60-4670-805c-5ab17c5de6b7", "node_type": "1", "metadata": {"window": "[17] Erica Fleishman and Dennis D. Murphy. A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n", "original_text": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. "}, "hash": "a491321023c025a1eac57f3a431f8de87ff4006c14c65f589229c7c6c09073b8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "054268bf-050f-4f8f-8d16-d655fa7ebae5", "node_type": "1", "metadata": {"window": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. ", "original_text": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. "}, "hash": "bbe8cc161c5cd9824f960c143a38c0bc7020c40947d6c25f334c69bc5dc5c254", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Sensors, 21(2), 2021.\n", "mimetype": "text/plain", "start_char_idx": 61166, "end_char_idx": 61188, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "054268bf-050f-4f8f-8d16-d655fa7ebae5": {"__data__": {"id_": "054268bf-050f-4f8f-8d16-d655fa7ebae5", "embedding": null, "metadata": {"window": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. ", "original_text": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "ec17855c-fbc3-4722-bdb6-df7f56be2c99", "node_type": "1", "metadata": {"window": "A realistic assessment of the indicator potential of\nbutterflies and other charismatic taxonomic groups. Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. ", "original_text": "Sensors, 21(2), 2021.\n"}, "hash": "767ed6106b896a70fd9b743fcd3589fb9a3f4a8a1457f9df45563f974309e98c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9d4996d3-8df9-413c-b554-a4c3b8670281", "node_type": "1", "metadata": {"window": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n", "original_text": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. "}, "hash": "6476095c4e6adc65862479f2f4fa51359bf5fcfdef518e9bf22e7e840d871d7c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. ", "mimetype": "text/plain", "start_char_idx": 61188, "end_char_idx": 61252, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9d4996d3-8df9-413c-b554-a4c3b8670281": {"__data__": {"id_": "9d4996d3-8df9-413c-b554-a4c3b8670281", "embedding": null, "metadata": {"window": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n", "original_text": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "054268bf-050f-4f8f-8d16-d655fa7ebae5", "node_type": "1", "metadata": {"window": "Conservation Biology, 23(5):1109\u20131116,\nOctober 2009.\n [18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. ", "original_text": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. "}, "hash": "bbe8cc161c5cd9824f960c143a38c0bc7020c40947d6c25f334c69bc5dc5c254", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "64e63e70-92ef-4fd8-93c0-1a3442e48122", "node_type": "1", "metadata": {"window": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. ", "original_text": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n"}, "hash": "9910eb717d598c4584b1757ce9ff9ca8f0e494ed944ad89b61aa49f4ea3631e3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. ", "mimetype": "text/plain", "start_char_idx": 61252, "end_char_idx": 61330, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "64e63e70-92ef-4fd8-93c0-1a3442e48122": {"__data__": {"id_": "64e63e70-92ef-4fd8-93c0-1a3442e48122", "embedding": null, "metadata": {"window": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. ", "original_text": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9d4996d3-8df9-413c-b554-a4c3b8670281", "node_type": "1", "metadata": {"window": "[18] Kim Bjerge, Jakob Bonde Nielsen, Martin Videb\u00e6k Sepstrup, Flemming Helsing-Nielsen,\nand Toke Thomas H\u00f8ye. An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n", "original_text": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. "}, "hash": "6476095c4e6adc65862479f2f4fa51359bf5fcfdef518e9bf22e7e840d871d7c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "8bbae6c9-cec0-4940-96d2-de9dfeea7913", "node_type": "1", "metadata": {"window": "Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n", "original_text": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. "}, "hash": "e7d0e6262170d9307a39df6da852e31f7f8db66dcdd46803f3baa8b0d92bbe22", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n", "mimetype": "text/plain", "start_char_idx": 61330, "end_char_idx": 61388, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "8bbae6c9-cec0-4940-96d2-de9dfeea7913": {"__data__": {"id_": "8bbae6c9-cec0-4940-96d2-de9dfeea7913", "embedding": null, "metadata": {"window": "Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n", "original_text": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "64e63e70-92ef-4fd8-93c0-1a3442e48122", "node_type": "1", "metadata": {"window": "An automated light trap to monitor moths (lepidoptera) using\ncomputer vision-based tracking and deep learning. Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. ", "original_text": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n"}, "hash": "9910eb717d598c4584b1757ce9ff9ca8f0e494ed944ad89b61aa49f4ea3631e3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "c574c077-3358-4a46-afeb-89a7034053a8", "node_type": "1", "metadata": {"window": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. ", "original_text": "Forest in-\nsects and climate change: long-term trends in herbivore damage. "}, "hash": "61bf1742c937b71d6b9e3c9c814d2163e0160a09efc6a7a5a16b7f1201a37419", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. ", "mimetype": "text/plain", "start_char_idx": 61388, "end_char_idx": 61464, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "c574c077-3358-4a46-afeb-89a7034053a8": {"__data__": {"id_": "c574c077-3358-4a46-afeb-89a7034053a8", "embedding": null, "metadata": {"window": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. ", "original_text": "Forest in-\nsects and climate change: long-term trends in herbivore damage. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "8bbae6c9-cec0-4940-96d2-de9dfeea7913", "node_type": "1", "metadata": {"window": "Sensors, 21(2), 2021.\n [19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n", "original_text": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. "}, "hash": "e7d0e6262170d9307a39df6da852e31f7f8db66dcdd46803f3baa8b0d92bbe22", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9a3ff54e-1669-4043-8d76-a7171553b662", "node_type": "1", "metadata": {"window": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n", "original_text": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n"}, "hash": "e9be3891daa20dcdf4b2d5a38fec688b4c3d390085cfeda6f8dd3b0235dd6e0c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Forest in-\nsects and climate change: long-term trends in herbivore damage. ", "mimetype": "text/plain", "start_char_idx": 61464, "end_char_idx": 61539, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9a3ff54e-1669-4043-8d76-a7171553b662": {"__data__": {"id_": "9a3ff54e-1669-4043-8d76-a7171553b662", "embedding": null, "metadata": {"window": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n", "original_text": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "c574c077-3358-4a46-afeb-89a7034053a8", "node_type": "1", "metadata": {"window": "[19] Michael J. Furlong, Denis J. Wright, and Lloyd M. Dosdall. Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. ", "original_text": "Forest in-\nsects and climate change: long-term trends in herbivore damage. "}, "hash": "61bf1742c937b71d6b9e3c9c814d2163e0160a09efc6a7a5a16b7f1201a37419", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2d101f06-5fdb-412d-bb11-284b477b526d", "node_type": "1", "metadata": {"window": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. ", "original_text": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. "}, "hash": "2312b2d4952322255590e590a9e78ce50652951ddcb9b2ed270c1553069b86fa", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n", "mimetype": "text/plain", "start_char_idx": 61539, "end_char_idx": 61595, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2d101f06-5fdb-412d-bb11-284b477b526d": {"__data__": {"id_": "2d101f06-5fdb-412d-bb11-284b477b526d", "embedding": null, "metadata": {"window": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. ", "original_text": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9a3ff54e-1669-4043-8d76-a7171553b662", "node_type": "1", "metadata": {"window": "Diamondback moth ecol-\nogy and management: Problems, progress, and prospects. Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n", "original_text": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n"}, "hash": "e9be3891daa20dcdf4b2d5a38fec688b4c3d390085cfeda6f8dd3b0235dd6e0c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "f5eede01-7991-4960-b26e-abb4b6112057", "node_type": "1", "metadata": {"window": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n", "original_text": "Deep residual learning for\nimage recognition, 2015.\n"}, "hash": "87fb88e87622337ec179f96d18770d57dc4a90e43c45cd51c94c1e515ec373a7", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. ", "mimetype": "text/plain", "start_char_idx": 61595, "end_char_idx": 61655, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "f5eede01-7991-4960-b26e-abb4b6112057": {"__data__": {"id_": "f5eede01-7991-4960-b26e-abb4b6112057", "embedding": null, "metadata": {"window": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n", "original_text": "Deep residual learning for\nimage recognition, 2015.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2d101f06-5fdb-412d-bb11-284b477b526d", "node_type": "1", "metadata": {"window": "Annual Review of Entomology,\n58(1):517\u2013541, January 2013.\n [20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. ", "original_text": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. "}, "hash": "2312b2d4952322255590e590a9e78ce50652951ddcb9b2ed270c1553069b86fa", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d2128023-5b6f-4293-87d9-f9b6ce300d82", "node_type": "1", "metadata": {"window": "Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. ", "original_text": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. "}, "hash": "0364bab05e822fdff3ffd5703dc3a4a88b99c6e9951050afeff91b63a3f39297", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Deep residual learning for\nimage recognition, 2015.\n", "mimetype": "text/plain", "start_char_idx": 61655, "end_char_idx": 61707, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d2128023-5b6f-4293-87d9-f9b6ce300d82": {"__data__": {"id_": "d2128023-5b6f-4293-87d9-f9b6ce300d82", "embedding": null, "metadata": {"window": "Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. ", "original_text": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "f5eede01-7991-4960-b26e-abb4b6112057", "node_type": "1", "metadata": {"window": "[20] Maartje J. Klapwijk, Gy\u00f6rgy Cs\u00f3ka, Anik\u00f3 Hirka, and Christer Bj\u00f6rkman. Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n", "original_text": "Deep residual learning for\nimage recognition, 2015.\n"}, "hash": "87fb88e87622337ec179f96d18770d57dc4a90e43c45cd51c94c1e515ec373a7", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "6c01809d-808d-4575-b9e4-5df076d1b87f", "node_type": "1", "metadata": {"window": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n", "original_text": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n"}, "hash": "6c4533e86bd95142964ce1e58e59e5e1cacf0a14dffc22d241cfed91e3396526", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. ", "mimetype": "text/plain", "start_char_idx": 61707, "end_char_idx": 61767, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "6c01809d-808d-4575-b9e4-5df076d1b87f": {"__data__": {"id_": "6c01809d-808d-4575-b9e4-5df076d1b87f", "embedding": null, "metadata": {"window": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n", "original_text": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d2128023-5b6f-4293-87d9-f9b6ce300d82", "node_type": "1", "metadata": {"window": "Forest in-\nsects and climate change: long-term trends in herbivore damage. Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. ", "original_text": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. "}, "hash": "0364bab05e822fdff3ffd5703dc3a4a88b99c6e9951050afeff91b63a3f39297", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "77b8d862-2e2b-416f-a1ef-61c02e3981ba", "node_type": "1", "metadata": {"window": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. ", "original_text": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. "}, "hash": "1c0d8542a48dd259181d0dad4f2922302f8e1c10f670ca180acab7ee137f1625", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n", "mimetype": "text/plain", "start_char_idx": 61767, "end_char_idx": 61853, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "77b8d862-2e2b-416f-a1ef-61c02e3981ba": {"__data__": {"id_": "77b8d862-2e2b-416f-a1ef-61c02e3981ba", "embedding": null, "metadata": {"window": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. ", "original_text": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "6c01809d-808d-4575-b9e4-5df076d1b87f", "node_type": "1", "metadata": {"window": "Ecology and Evolution,\n3(12):4183\u20134196, September 2013.\n [21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n", "original_text": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n"}, "hash": "6c4533e86bd95142964ce1e58e59e5e1cacf0a14dffc22d241cfed91e3396526", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "55bb919e-075f-47f2-930b-44b9f987b2e6", "node_type": "1", "metadata": {"window": "Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. ", "original_text": "Feature pyramid networks for object detection, 2017.\n"}, "hash": "995e5396921afc9f0608bb8bf317194ac4856dbf7e4fd9a8d19e165a67895304", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. ", "mimetype": "text/plain", "start_char_idx": 61853, "end_char_idx": 61954, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "55bb919e-075f-47f2-930b-44b9f987b2e6": {"__data__": {"id_": "55bb919e-075f-47f2-930b-44b9f987b2e6", "embedding": null, "metadata": {"window": "Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. ", "original_text": "Feature pyramid networks for object detection, 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "77b8d862-2e2b-416f-a1ef-61c02e3981ba", "node_type": "1", "metadata": {"window": "[21] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. ", "original_text": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. "}, "hash": "1c0d8542a48dd259181d0dad4f2922302f8e1c10f670ca180acab7ee137f1625", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5c7717d8-3424-44f0-8f14-a1b33d61052f", "node_type": "1", "metadata": {"window": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. ", "original_text": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. "}, "hash": "c44317e46d1bcfa1d998181e4abb4d3b0b1474216e4fde3aa95fe076c961830c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Feature pyramid networks for object detection, 2017.\n", "mimetype": "text/plain", "start_char_idx": 61954, "end_char_idx": 62007, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5c7717d8-3424-44f0-8f14-a1b33d61052f": {"__data__": {"id_": "5c7717d8-3424-44f0-8f14-a1b33d61052f", "embedding": null, "metadata": {"window": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. ", "original_text": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "55bb919e-075f-47f2-930b-44b9f987b2e6", "node_type": "1", "metadata": {"window": "Deep residual learning for\nimage recognition, 2015.\n [22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. ", "original_text": "Feature pyramid networks for object detection, 2017.\n"}, "hash": "995e5396921afc9f0608bb8bf317194ac4856dbf7e4fd9a8d19e165a67895304", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "da6b6896-c646-42c6-ac16-224663617bde", "node_type": "1", "metadata": {"window": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n", "original_text": "Segment anything, 2023.\n"}, "hash": "6476e0ea9ddb4405e93739944500f50f25e7ad46a9a8797f0dc495eddb65710a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. ", "mimetype": "text/plain", "start_char_idx": 62007, "end_char_idx": 62196, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "da6b6896-c646-42c6-ac16-224663617bde": {"__data__": {"id_": "da6b6896-c646-42c6-ac16-224663617bde", "embedding": null, "metadata": {"window": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n", "original_text": "Segment anything, 2023.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5c7717d8-3424-44f0-8f14-a1b33d61052f", "node_type": "1", "metadata": {"window": "[22] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. ", "original_text": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. "}, "hash": "c44317e46d1bcfa1d998181e4abb4d3b0b1474216e4fde3aa95fe076c961830c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "30afc2d8-2758-449e-88d5-79a729e8b528", "node_type": "1", "metadata": {"window": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. ", "original_text": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. "}, "hash": "943754cd45c2408a7fd99e06aed18f67b7e9dc8661ea577e600ae1ddea56f002", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Segment anything, 2023.\n", "mimetype": "text/plain", "start_char_idx": 62196, "end_char_idx": 62220, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "30afc2d8-2758-449e-88d5-79a729e8b528": {"__data__": {"id_": "30afc2d8-2758-449e-88d5-79a729e8b528", "embedding": null, "metadata": {"window": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. ", "original_text": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "da6b6896-c646-42c6-ac16-224663617bde", "node_type": "1", "metadata": {"window": "Faster r-cnn: Towards real-time\nobject detection with region proposal networks, 2015.\n [23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n", "original_text": "Segment anything, 2023.\n"}, "hash": "6476e0ea9ddb4405e93739944500f50f25e7ad46a9a8797f0dc495eddb65710a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "fbd270ed-190a-487e-9aa5-e16107ea5572", "node_type": "1", "metadata": {"window": "Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n", "original_text": "SSD: Single shot MultiBox detector. "}, "hash": "6951122c3507756188d27354b29e2e0eaaeabec5fac3c891dc64a186d2cb82f1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. ", "mimetype": "text/plain", "start_char_idx": 62220, "end_char_idx": 62341, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "fbd270ed-190a-487e-9aa5-e16107ea5572": {"__data__": {"id_": "fbd270ed-190a-487e-9aa5-e16107ea5572", "embedding": null, "metadata": {"window": "Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n", "original_text": "SSD: Single shot MultiBox detector. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "30afc2d8-2758-449e-88d5-79a729e8b528", "node_type": "1", "metadata": {"window": "[23] Tsung-Yi Lin, Piotr Doll\u00e1r, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-\nlongie. Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. ", "original_text": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. "}, "hash": "943754cd45c2408a7fd99e06aed18f67b7e9dc8661ea577e600ae1ddea56f002", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "66f42a79-e531-48d2-839a-be0e2e64c06d", "node_type": "1", "metadata": {"window": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . ", "original_text": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. "}, "hash": "78c3f984544a414c9a734a80aef695bf1338e71747d944efdd549b3cdc0a0cbe", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "SSD: Single shot MultiBox detector. ", "mimetype": "text/plain", "start_char_idx": 62341, "end_char_idx": 62377, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "66f42a79-e531-48d2-839a-be0e2e64c06d": {"__data__": {"id_": "66f42a79-e531-48d2-839a-be0e2e64c06d", "embedding": null, "metadata": {"window": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . ", "original_text": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "fbd270ed-190a-487e-9aa5-e16107ea5572", "node_type": "1", "metadata": {"window": "Feature pyramid networks for object detection, 2017.\n [24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n", "original_text": "SSD: Single shot MultiBox detector. "}, "hash": "6951122c3507756188d27354b29e2e0eaaeabec5fac3c891dc64a186d2cb82f1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "a12b77d7-1761-4301-a1e0-6e8426f7426f", "node_type": "1", "metadata": {"window": "Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. ", "original_text": "Springer International Publishing, 2016.\n"}, "hash": "94d8b26623210b3a7d05a9ebd0223a08971fa2026a2454c60c90fd37f5de1341", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. ", "mimetype": "text/plain", "start_char_idx": 62377, "end_char_idx": 62421, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "a12b77d7-1761-4301-a1e0-6e8426f7426f": {"__data__": {"id_": "a12b77d7-1761-4301-a1e0-6e8426f7426f", "embedding": null, "metadata": {"window": "Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. ", "original_text": "Springer International Publishing, 2016.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "66f42a79-e531-48d2-839a-be0e2e64c06d", "node_type": "1", "metadata": {"window": "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura\nGustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r,\nand Ross Girshick. Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . ", "original_text": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. "}, "hash": "78c3f984544a414c9a734a80aef695bf1338e71747d944efdd549b3cdc0a0cbe", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d49eb40a-2874-42a9-b371-c80448c30d6e", "node_type": "1", "metadata": {"window": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n", "original_text": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. "}, "hash": "946b04885e469677b0e2d59bfa334d49b30902f20419ebb63fe3daf5683507f8", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Springer International Publishing, 2016.\n", "mimetype": "text/plain", "start_char_idx": 62421, "end_char_idx": 62462, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d49eb40a-2874-42a9-b371-c80448c30d6e": {"__data__": {"id_": "d49eb40a-2874-42a9-b371-c80448c30d6e", "embedding": null, "metadata": {"window": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n", "original_text": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "a12b77d7-1761-4301-a1e0-6e8426f7426f", "node_type": "1", "metadata": {"window": "Segment anything, 2023.\n 32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. ", "original_text": "Springer International Publishing, 2016.\n"}, "hash": "94d8b26623210b3a7d05a9ebd0223a08971fa2026a2454c60c90fd37f5de1341", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9d9f5dee-f7cc-411d-99cc-c8342fd0e285", "node_type": "1", "metadata": {"window": "SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. ", "original_text": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n"}, "hash": "2f785f3a5ac067f437536636a6c82f22cc48f8c3b87b0a136f1fb8b1b4bdae8f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. ", "mimetype": "text/plain", "start_char_idx": 62462, "end_char_idx": 62550, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9d9f5dee-f7cc-411d-99cc-c8342fd0e285": {"__data__": {"id_": "9d9f5dee-f7cc-411d-99cc-c8342fd0e285", "embedding": null, "metadata": {"window": "SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. ", "original_text": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d49eb40a-2874-42a9-b371-c80448c30d6e", "node_type": "1", "metadata": {"window": "32\n\n[25] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang\nFu, and Alexander C. Berg. SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n", "original_text": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. "}, "hash": "946b04885e469677b0e2d59bfa334d49b30902f20419ebb63fe3daf5683507f8", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "830126df-bbee-4e7f-931d-23d2e7f81a64", "node_type": "1", "metadata": {"window": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n", "original_text": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . "}, "hash": "83dde5e1b615d2d05b15a3c4816828c36a704d09022473b0fd19636e60112b2a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n", "mimetype": "text/plain", "start_char_idx": 62550, "end_char_idx": 62612, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "830126df-bbee-4e7f-931d-23d2e7f81a64": {"__data__": {"id_": "830126df-bbee-4e7f-931d-23d2e7f81a64", "embedding": null, "metadata": {"window": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n", "original_text": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9d9f5dee-f7cc-411d-99cc-c8342fd0e285", "node_type": "1", "metadata": {"window": "SSD: Single shot MultiBox detector. InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. ", "original_text": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n"}, "hash": "2f785f3a5ac067f437536636a6c82f22cc48f8c3b87b0a136f1fb8b1b4bdae8f", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "66e1ea73-e973-43e7-8cc6-b1a147c72a15", "node_type": "1", "metadata": {"window": "Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. ", "original_text": "Le, and Hartwig\nAdam. "}, "hash": "226af9ab9e1906a17b93688d9345d7b3a0bab903ede7ad12cbd68309440045b9", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . ", "mimetype": "text/plain", "start_char_idx": 62612, "end_char_idx": 62762, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "66e1ea73-e973-43e7-8cc6-b1a147c72a15": {"__data__": {"id_": "66e1ea73-e973-43e7-8cc6-b1a147c72a15", "embedding": null, "metadata": {"window": "Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. ", "original_text": "Le, and Hartwig\nAdam. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "830126df-bbee-4e7f-931d-23d2e7f81a64", "node_type": "1", "metadata": {"window": "InComputer Vision \u2013 ECCV\n2016, pages 21\u201337. Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n", "original_text": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . "}, "hash": "83dde5e1b615d2d05b15a3c4816828c36a704d09022473b0fd19636e60112b2a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da", "node_type": "1", "metadata": {"window": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n", "original_text": "Searching for mobilenetv3, 2019.\n"}, "hash": "a63fd536b301ab44338017fe380c44325c75e812aaba55036fb2b077a3629390", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Le, and Hartwig\nAdam. ", "mimetype": "text/plain", "start_char_idx": 62762, "end_char_idx": 62784, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da": {"__data__": {"id_": "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da", "embedding": null, "metadata": {"window": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n", "original_text": "Searching for mobilenetv3, 2019.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "66e1ea73-e973-43e7-8cc6-b1a147c72a15", "node_type": "1", "metadata": {"window": "Springer International Publishing, 2016.\n [26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. ", "original_text": "Le, and Hartwig\nAdam. "}, "hash": "226af9ab9e1906a17b93688d9345d7b3a0bab903ede7ad12cbd68309440045b9", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "3111c0d1-1abb-4b24-b802-42bec515d202", "node_type": "1", "metadata": {"window": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. ", "original_text": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. "}, "hash": "50d46c5a5db608e250005682639a5c99a561ff001bde1c0162f02a5952af1ed3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Searching for mobilenetv3, 2019.\n", "mimetype": "text/plain", "start_char_idx": 62784, "end_char_idx": 62817, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "3111c0d1-1abb-4b24-b802-42bec515d202": {"__data__": {"id_": "3111c0d1-1abb-4b24-b802-42bec515d202", "embedding": null, "metadata": {"window": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. ", "original_text": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da", "node_type": "1", "metadata": {"window": "[26] Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh\nChen. Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n", "original_text": "Searching for mobilenetv3, 2019.\n"}, "hash": "a63fd536b301ab44338017fe380c44325c75e812aaba55036fb2b077a3629390", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "b29643f4-3f54-4505-aa74-e439bfb13dff", "node_type": "1", "metadata": {"window": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. ", "original_text": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n"}, "hash": "29b8502144c168bd1c7eb86c4fb08b0991bd8c30d5efa7b2492b70037277987c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. ", "mimetype": "text/plain", "start_char_idx": 62817, "end_char_idx": 62889, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "b29643f4-3f54-4505-aa74-e439bfb13dff": {"__data__": {"id_": "b29643f4-3f54-4505-aa74-e439bfb13dff", "embedding": null, "metadata": {"window": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. ", "original_text": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "3111c0d1-1abb-4b24-b802-42bec515d202", "node_type": "1", "metadata": {"window": "Mobilenetv2: Inverted residuals and linear bottlenecks, 2019.\n [27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. ", "original_text": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. "}, "hash": "50d46c5a5db608e250005682639a5c99a561ff001bde1c0162f02a5952af1ed3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "50bffb3f-4038-4968-8849-57f4fb393859", "node_type": "1", "metadata": {"window": "Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n", "original_text": "[29] Ozan Sener and Silvio Savarese. "}, "hash": "81f728cf4475c2088f614289a4d3118872b1fd4140b25215e5a7c38ff68cc8ea", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n", "mimetype": "text/plain", "start_char_idx": 62889, "end_char_idx": 62999, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "50bffb3f-4038-4968-8849-57f4fb393859": {"__data__": {"id_": "50bffb3f-4038-4968-8849-57f4fb393859", "embedding": null, "metadata": {"window": "Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n", "original_text": "[29] Ozan Sener and Silvio Savarese. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "b29643f4-3f54-4505-aa74-e439bfb13dff", "node_type": "1", "metadata": {"window": "[27] Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan,\nWeijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V . Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. ", "original_text": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n"}, "hash": "29b8502144c168bd1c7eb86c4fb08b0991bd8c30d5efa7b2492b70037277987c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77", "node_type": "1", "metadata": {"window": "Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. ", "original_text": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n"}, "hash": "b16bc3f6f45daea6ec5e04061215c3675f9338aa6739bea7bb084ff2358a186c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[29] Ozan Sener and Silvio Savarese. ", "mimetype": "text/plain", "start_char_idx": 62999, "end_char_idx": 63036, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77": {"__data__": {"id_": "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77", "embedding": null, "metadata": {"window": "Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. ", "original_text": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "50bffb3f-4038-4968-8849-57f4fb393859", "node_type": "1", "metadata": {"window": "Le, and Hartwig\nAdam. Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n", "original_text": "[29] Ozan Sener and Silvio Savarese. "}, "hash": "81f728cf4475c2088f614289a4d3118872b1fd4140b25215e5a7c38ff68cc8ea", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0f14e350-6445-4b5f-a34f-55f1535b0a61", "node_type": "1", "metadata": {"window": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n", "original_text": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. "}, "hash": "eeb39fa196153e857370c38936765ece792a44c894179158d2f232895c6e6e37", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n", "mimetype": "text/plain", "start_char_idx": 63036, "end_char_idx": 63114, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0f14e350-6445-4b5f-a34f-55f1535b0a61": {"__data__": {"id_": "0f14e350-6445-4b5f-a34f-55f1535b0a61", "embedding": null, "metadata": {"window": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n", "original_text": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77", "node_type": "1", "metadata": {"window": "Searching for mobilenetv3, 2019.\n [28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. ", "original_text": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n"}, "hash": "b16bc3f6f45daea6ec5e04061215c3675f9338aa6739bea7bb084ff2358a186c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee", "node_type": "1", "metadata": {"window": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. ", "original_text": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. "}, "hash": "2eb0d89705e16b3d40b509453eb0c2e5ca72c9dedfb19a813f9e980bd59e152d", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. ", "mimetype": "text/plain", "start_char_idx": 63114, "end_char_idx": 63156, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee": {"__data__": {"id_": "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee", "embedding": null, "metadata": {"window": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. ", "original_text": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0f14e350-6445-4b5f-a34f-55f1535b0a61", "node_type": "1", "metadata": {"window": "[28] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n", "original_text": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. "}, "hash": "eeb39fa196153e857370c38936765ece792a44c894179158d2f232895c6e6e37", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "15efe044-f628-44a2-9ebc-edd428b40cd3", "node_type": "1", "metadata": {"window": "[29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n", "original_text": "Applied Sciences, 12(16), 2022.\n"}, "hash": "aac2ce2d27a37eb7b3f04a88a215cd73dfcc680c6a83094f3cdf355120b8388a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. ", "mimetype": "text/plain", "start_char_idx": 63156, "end_char_idx": 63249, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "15efe044-f628-44a2-9ebc-edd428b40cd3": {"__data__": {"id_": "15efe044-f628-44a2-9ebc-edd428b40cd3", "embedding": null, "metadata": {"window": "[29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n", "original_text": "Applied Sciences, 12(16), 2022.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee", "node_type": "1", "metadata": {"window": "Bridging the gap\nbetween anchor-based and anchor-free detection via adaptive training sample selection,\n2020.\n [29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. ", "original_text": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. "}, "hash": "2eb0d89705e16b3d40b509453eb0c2e5ca72c9dedfb19a813f9e980bd59e152d", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9486a348-5e69-4f3b-bdd6-9d03f4e3c688", "node_type": "1", "metadata": {"window": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. ", "original_text": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. "}, "hash": "38335e7b1088191fc1d880da0d3e01c07baea3ab1b71660f1552bc5f1f2037e3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Applied Sciences, 12(16), 2022.\n", "mimetype": "text/plain", "start_char_idx": 63249, "end_char_idx": 63281, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9486a348-5e69-4f3b-bdd6-9d03f4e3c688": {"__data__": {"id_": "9486a348-5e69-4f3b-bdd6-9d03f4e3c688", "embedding": null, "metadata": {"window": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. ", "original_text": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "15efe044-f628-44a2-9ebc-edd428b40cd3", "node_type": "1", "metadata": {"window": "[29] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n", "original_text": "Applied Sciences, 12(16), 2022.\n"}, "hash": "aac2ce2d27a37eb7b3f04a88a215cd73dfcc680c6a83094f3cdf355120b8388a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0ca63949-82e1-4217-bd3f-6e5bb73e27fd", "node_type": "1", "metadata": {"window": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n", "original_text": "Epistemic uncertainty sam-\npling, 2019.\n"}, "hash": "894c4cd6eef12d75c016ea40efe9a23390800a12edb8d80efecfe96e5b99152a", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. ", "mimetype": "text/plain", "start_char_idx": 63281, "end_char_idx": 63345, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0ca63949-82e1-4217-bd3f-6e5bb73e27fd": {"__data__": {"id_": "0ca63949-82e1-4217-bd3f-6e5bb73e27fd", "embedding": null, "metadata": {"window": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n", "original_text": "Epistemic uncertainty sam-\npling, 2019.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "9486a348-5e69-4f3b-bdd6-9d03f4e3c688", "node_type": "1", "metadata": {"window": "Active learning for convolutional neural networks: A\ncore-set approach, 2017.\n [30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. ", "original_text": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. "}, "hash": "38335e7b1088191fc1d880da0d3e01c07baea3ab1b71660f1552bc5f1f2037e3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e2c38443-be9a-4f29-8991-640546120884", "node_type": "1", "metadata": {"window": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. ", "original_text": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. "}, "hash": "1beb605f8ab4cf0bbbc574fb83d5fde41ff129c19415c9726c3df48e425f7f6c", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Epistemic uncertainty sam-\npling, 2019.\n", "mimetype": "text/plain", "start_char_idx": 63345, "end_char_idx": 63385, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e2c38443-be9a-4f29-8991-640546120884": {"__data__": {"id_": "e2c38443-be9a-4f29-8991-640546120884", "embedding": null, "metadata": {"window": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. ", "original_text": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0ca63949-82e1-4217-bd3f-6e5bb73e27fd", "node_type": "1", "metadata": {"window": "[30] Mingfei Wu, Chen Li, and Zehuan Yao. Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n", "original_text": "Epistemic uncertainty sam-\npling, 2019.\n"}, "hash": "894c4cd6eef12d75c016ea40efe9a23390800a12edb8d80efecfe96e5b99152a", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "427a174b-7c18-44cd-8b6a-8a994bc62455", "node_type": "1", "metadata": {"window": "Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. ", "original_text": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n"}, "hash": "6e78d86d5e773508e9b5b76b78ea3f0ad0f353744dafda66cc921fa8d17a5ef4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. ", "mimetype": "text/plain", "start_char_idx": 63385, "end_char_idx": 63456, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "427a174b-7c18-44cd-8b6a-8a994bc62455": {"__data__": {"id_": "427a174b-7c18-44cd-8b6a-8a994bc62455", "embedding": null, "metadata": {"window": "Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. ", "original_text": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e2c38443-be9a-4f29-8991-640546120884", "node_type": "1", "metadata": {"window": "Deep active learning for computer vision tasks:\nMethodologies, applications, and challenges. Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. ", "original_text": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. "}, "hash": "1beb605f8ab4cf0bbbc574fb83d5fde41ff129c19415c9726c3df48e425f7f6c", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "e241febe-6412-48c5-8a16-5e44d034c987", "node_type": "1", "metadata": {"window": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n", "original_text": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. "}, "hash": "bd170b2f5827830670629176b273243d906980f3d80f7e491e765d7843e92553", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n", "mimetype": "text/plain", "start_char_idx": 63456, "end_char_idx": 63538, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "e241febe-6412-48c5-8a16-5e44d034c987": {"__data__": {"id_": "e241febe-6412-48c5-8a16-5e44d034c987", "embedding": null, "metadata": {"window": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n", "original_text": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "427a174b-7c18-44cd-8b6a-8a994bc62455", "node_type": "1", "metadata": {"window": "Applied Sciences, 12(16), 2022.\n [31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. ", "original_text": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n"}, "hash": "6e78d86d5e773508e9b5b76b78ea3f0ad0f353744dafda66cc921fa8d17a5ef4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "436ad187-cf6a-4df3-8999-fe7889f88800", "node_type": "1", "metadata": {"window": "Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. ", "original_text": "Deep bayesian active learning with\nimage data, 2017.\n"}, "hash": "bb36842f01ab57ce8616c2ca5d76efe229c7afb275b7ec6bb89067488974bff5", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. ", "mimetype": "text/plain", "start_char_idx": 63538, "end_char_idx": 63592, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "436ad187-cf6a-4df3-8999-fe7889f88800": {"__data__": {"id_": "436ad187-cf6a-4df3-8999-fe7889f88800", "embedding": null, "metadata": {"window": "Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. ", "original_text": "Deep bayesian active learning with\nimage data, 2017.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "e241febe-6412-48c5-8a16-5e44d034c987", "node_type": "1", "metadata": {"window": "[31] Vu-Linh Nguyen, S\u00e9bastien Destercke, and Eyke H\u00fcllermeier. Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n", "original_text": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. "}, "hash": "bd170b2f5827830670629176b273243d906980f3d80f7e491e765d7843e92553", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d83bf9c5-91f3-4873-bd79-797f72f16f13", "node_type": "1", "metadata": {"window": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n", "original_text": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. "}, "hash": "ae83ec91b5289353228cd780455e0b6ce3bb54f8683bd6d7764af4a8881030d3", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Deep bayesian active learning with\nimage data, 2017.\n", "mimetype": "text/plain", "start_char_idx": 63592, "end_char_idx": 63645, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d83bf9c5-91f3-4873-bd79-797f72f16f13": {"__data__": {"id_": "d83bf9c5-91f3-4873-bd79-797f72f16f13", "embedding": null, "metadata": {"window": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n", "original_text": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "436ad187-cf6a-4df3-8999-fe7889f88800", "node_type": "1", "metadata": {"window": "Epistemic uncertainty sam-\npling, 2019.\n [32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. ", "original_text": "Deep bayesian active learning with\nimage data, 2017.\n"}, "hash": "bb36842f01ab57ce8616c2ca5d76efe229c7afb275b7ec6bb89067488974bff5", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "0cf124c4-29d6-4f97-8a63-012168912766", "node_type": "1", "metadata": {"window": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. ", "original_text": "The power\nof ensembles for active learning in image classification. "}, "hash": "38c14ca19d658f5c2c1961a619e5a0a03c5ca22ae3e8b92391c04f888510c143", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. ", "mimetype": "text/plain", "start_char_idx": 63645, "end_char_idx": 63722, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "0cf124c4-29d6-4f97-8a63-012168912766": {"__data__": {"id_": "0cf124c4-29d6-4f97-8a63-012168912766", "embedding": null, "metadata": {"window": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. ", "original_text": "The power\nof ensembles for active learning in image classification. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d83bf9c5-91f3-4873-bd79-797f72f16f13", "node_type": "1", "metadata": {"window": "[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n", "original_text": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. "}, "hash": "ae83ec91b5289353228cd780455e0b6ce3bb54f8683bd6d7764af4a8881030d3", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c", "node_type": "1", "metadata": {"window": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n", "original_text": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n"}, "hash": "f0cbad1a7888b0fe03b8064532e120078595eb7d2df267601b4b7cf4a7176282", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "The power\nof ensembles for active learning in image classification. ", "mimetype": "text/plain", "start_char_idx": 63722, "end_char_idx": 63790, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c": {"__data__": {"id_": "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c", "embedding": null, "metadata": {"window": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n", "original_text": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "0cf124c4-29d6-4f97-8a63-012168912766", "node_type": "1", "metadata": {"window": "Simple and scalable\npredictive uncertainty estimation using deep ensembles, 2017.\n [33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. ", "original_text": "The power\nof ensembles for active learning in image classification. "}, "hash": "38c14ca19d658f5c2c1961a619e5a0a03c5ca22ae3e8b92391c04f888510c143", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf", "node_type": "1", "metadata": {"window": "Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[35] Remus Pop and Patric Fulop. "}, "hash": "8f94838941ddd6b2e41a3db74bf4b7d74bf3f824fa2dcc53327d0ceed7f60b31", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n", "mimetype": "text/plain", "start_char_idx": 63790, "end_char_idx": 63885, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf": {"__data__": {"id_": "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf", "embedding": null, "metadata": {"window": "Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[35] Remus Pop and Patric Fulop. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c", "node_type": "1", "metadata": {"window": "[33] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n", "original_text": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n"}, "hash": "f0cbad1a7888b0fe03b8064532e120078595eb7d2df267601b4b7cf4a7176282", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "d385fdd2-c297-4c6c-ad3e-72160865aca6", "node_type": "1", "metadata": {"window": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n"}, "hash": "6e569bd58a5f0b9ed0f7b96106e8f77d4d6d57b97a938addd33f6013a86008e1", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[35] Remus Pop and Patric Fulop. ", "mimetype": "text/plain", "start_char_idx": 63885, "end_char_idx": 63918, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "d385fdd2-c297-4c6c-ad3e-72160865aca6": {"__data__": {"id_": "d385fdd2-c297-4c6c-ad3e-72160865aca6", "embedding": null, "metadata": {"window": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf", "node_type": "1", "metadata": {"window": "Deep bayesian active learning with\nimage data, 2017.\n [34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[35] Remus Pop and Patric Fulop. "}, "hash": "8f94838941ddd6b2e41a3db74bf4b7d74bf3f824fa2dcc53327d0ceed7f60b31", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "2ea28564-d0f1-4c23-a5c8-59e6384fdbac", "node_type": "1", "metadata": {"window": "The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. "}, "hash": "7202705a798979deed64a9341763c8210c5da8a5c33df54707a30e4a606630a4", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n", "mimetype": "text/plain", "start_char_idx": 63918, "end_char_idx": 64038, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "2ea28564-d0f1-4c23-a5c8-59e6384fdbac": {"__data__": {"id_": "2ea28564-d0f1-4c23-a5c8-59e6384fdbac", "embedding": null, "metadata": {"window": "The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. "}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "d385fdd2-c297-4c6c-ad3e-72160865aca6", "node_type": "1", "metadata": {"window": "[34] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n"}, "hash": "6e569bd58a5f0b9ed0f7b96106e8f77d4d6d57b97a938addd33f6013a86008e1", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "7620fdfe-7417-4199-94f6-2cda47a57c32", "node_type": "1", "metadata": {"window": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Training data\nsubset search with ensemble active learning, 2020.\n"}, "hash": "37d648d0f5f6c94e179bfcdf7ee6f75f85c0dc555b83a023dd7f1c0119864a65", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "[36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. ", "mimetype": "text/plain", "start_char_idx": 64038, "end_char_idx": 64114, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "7620fdfe-7417-4199-94f6-2cda47a57c32": {"__data__": {"id_": "7620fdfe-7417-4199-94f6-2cda47a57c32", "embedding": null, "metadata": {"window": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Training data\nsubset search with ensemble active learning, 2020.\n"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "2ea28564-d0f1-4c23-a5c8-59e6384fdbac", "node_type": "1", "metadata": {"window": "The power\nof ensembles for active learning in image classification. In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "[36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. "}, "hash": "7202705a798979deed64a9341763c8210c5da8a5c33df54707a30e4a606630a4", "class_name": "RelatedNodeInfo"}, "3": {"node_id": "9f92600d-e49e-488b-89da-ae0f1e80d943", "node_type": "1", "metadata": {"window": "[35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "33"}, "hash": "02399516160020f38beeae50018994b2d1e87c6a904be3041a98436d419a237f", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "Training data\nsubset search with ensemble active learning, 2020.\n", "mimetype": "text/plain", "start_char_idx": 64114, "end_char_idx": 64179, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}, "9f92600d-e49e-488b-89da-ae0f1e80d943": {"__data__": {"id_": "9f92600d-e49e-488b-89da-ae0f1e80d943", "embedding": null, "metadata": {"window": "[35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "33"}, "excluded_embed_metadata_keys": ["window", "original_text"], "excluded_llm_metadata_keys": ["window", "original_text"], "relationships": {"1": {"node_id": "89dabcdf-f292-43c7-abfd-eb0cc1cd36ef", "node_type": "4", "metadata": {}, "hash": "4e7f38b7a69862f402a9fa1889c2da69957c3b4abcf4edf04527c9853e580b3a", "class_name": "RelatedNodeInfo"}, "2": {"node_id": "7620fdfe-7417-4199-94f6-2cda47a57c32", "node_type": "1", "metadata": {"window": "In 2018 IEEE/CVF Conference on\nComputer Vision and Pattern Recognition, pages 9368\u20139377, 2018.\n [35] Remus Pop and Patric Fulop. Deep ensemble bayesian active learning : Addressing the\nmode collapse issue in monte carlo dropout via ensembles, 2018.\n [36] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Training data\nsubset search with ensemble active learning, 2020.\n 33", "original_text": "Training data\nsubset search with ensemble active learning, 2020.\n"}, "hash": "37d648d0f5f6c94e179bfcdf7ee6f75f85c0dc555b83a023dd7f1c0119864a65", "class_name": "RelatedNodeInfo"}}, "metadata_template": "{key}: {value}", "metadata_separator": "\n", "text": "33", "mimetype": "text/plain", "start_char_idx": 43188, "end_char_idx": 43190, "metadata_seperator": "\n", "text_template": "{metadata_str}\n\n{content}", "class_name": "TextNode"}, "__type__": "1"}}, "docstore/ref_doc_info": {"89dabcdf-f292-43c7-abfd-eb0cc1cd36ef": {"node_ids": ["aabd4e55-6fdd-4c8b-a0d3-a15f80ae25a2", "bab022a3-166f-40d5-878c-8884ec28a68e", "67c22b47-4006-4279-84ac-8ed8a95033c3", "f031e3b4-2b2c-4f36-994a-c6061ff9dfa9", "cccfb5cd-b1bf-48c4-bb6e-768bcc0e2a35", "dd0fddcb-cf59-4229-9761-07fe375dcf93", "978fa62b-6aa1-406e-9b9f-aeeae892855e", "a4081bcd-845c-4af3-8367-4566f59e84ea", "2bcb37eb-15d9-45b0-90a4-9839a4e03171", "d0cd9d4f-ac2b-4369-b4fb-2364d6f6443f", "783934d5-5c03-4aa3-91df-0511ee3fe295", "f1537c3f-b20d-4906-9223-ba4db80d26ea", "2ec11b97-ca4a-4f04-af28-a28a973588ad", "70a3da41-6cec-4681-ba3c-49e2435cf197", "e65f4f16-a9cb-4834-83a9-c535d8c4a8f4", "dda1c6a3-28f9-4bda-b62f-dff4b810185d", "d6392f76-98df-4a4f-8c57-87fc17bc43d4", "101ce625-0317-4b48-9c7d-ffa14b7ff2e4", "612e34a5-f745-45ef-a91e-61b5d08ab91f", "d896c04e-1e3b-43d7-9126-c4022852cae6", "0d478908-16c9-4bee-a1f6-a0b09a03e809", "caa8c289-2580-416c-820e-ca38387fb3f3", "2f207ee1-2319-43b0-8519-dc7a066d9635", "2082ffbc-50a6-4245-869e-13f6cd05b84e", "507c7436-6017-4528-a1d7-2704c59e25f7", "98b3c0f2-3971-411e-b1ba-debf0b43e083", "878cc548-b87a-44d0-a6db-215e183ce7c2", "18c60443-514d-4459-84d8-360fd8241f31", "cf6fb891-cac2-4509-87d1-65d0fad1ea5b", "764726e7-9acd-4594-b590-a42270feea79", "27c57ac9-1cb3-493b-9c02-f2d0b28855e4", "7322f358-1948-4f55-9aea-a451de512f2b", "1cd51d40-0a9b-4f77-8b36-da9f272044a3", "4458f71f-836e-41d1-b9f2-183e5878227d", "7dc34b2c-b910-4687-9c1a-94cf4247e83f", "94e5821e-8cb8-4bfa-b65c-bc7e6aecf565", "6b2b9f02-004d-49b1-ba1e-1aef1cf72c91", "aeaa56ff-0b5b-4c6d-937c-cd35745872f1", "59bf8388-9103-4ffa-a97d-a353069b2ca9", "e013a9c5-8dcf-458a-ad54-d8b12410470e", "5a46f5e9-219b-45d0-b79c-cce4a88153a1", "0212d949-c329-4c0e-b0f3-739a42833930", "fdaa9304-1361-4b1f-91eb-74fe75240fe9", "9f248ad9-d6d8-4c23-80e8-0be85c2512c1", "46e47b4f-618e-4a8b-bde3-d6e2db0f4434", "6904607e-ebd7-4584-b0e4-6901a6ded5c9", "189d3591-5eb0-4e8c-8083-15146d6f39c5", "2663141b-9f22-4456-bf58-ce45376a82ee", "f14aa0ab-e7f4-4c64-86a4-e51bc7cf6c55", "1d5e2f44-030c-4b16-9486-3821ba0f64f5", "72f7a9fe-d9b8-47df-8860-8821a6a795ce", "0d7e9c5b-69b3-40f2-bbac-65da55dc8f51", "1357de3a-3304-402b-9cc3-c41c92089f29", "38583da1-6a7c-459e-b2dd-6a3bb3f7f9b3", "a7ae8c96-7b8b-4d67-96b1-3fd93962b469", "b8f64b78-94dd-4340-a0ef-bcc6f67c20e1", "1fed8e1b-2011-4eae-9beb-b25bcb5396a7", "79b39b0b-c58d-4515-b58c-4501287cd5a6", "dbbb0fe6-8046-43e4-84bb-33efb81d9010", "a9e6fede-3839-4dcc-a97c-a31b3d738fe5", "6f49a1ce-a01d-42dd-9f32-ee1dcaec4d7f", "3c0dc0df-d398-402f-b60c-ea1b5eef7478", "babd73f5-1108-443d-9dd2-f9d573cb7337", "21752a6d-37e6-4e72-8467-7c3aca49ffbd", "c6de1561-d737-4afe-addc-f4c1df3896f7", "fe69ec8c-fe54-4cd6-8fd4-18a473c401c2", "4010c527-89ec-45f4-bc3b-0702fc15e693", "0b925135-db51-454a-abe8-4f1338dd016f", "5a16f20f-94d5-46c9-8fba-d2c5653c4625", "99084c8e-364e-4e73-b47a-be787bb2a107", "6de4b45f-3d4e-4981-be81-49cdd631f2f9", "ffec42bb-fed8-4d55-9de6-de0fdd96aa16", "17f35e00-841c-4ea7-8378-3f48fc651993", "4d24ae53-f918-4097-a23f-94c9be43125e", "e4072659-f32c-4460-9a09-8439bf6bdd70", "efd429eb-de49-44f3-8264-fd7b30873af8", "c69be104-16fd-4e86-8e3b-e9f02e15be3f", "2ca64856-cdc6-4999-9ecb-efaa18d55ec6", "fe5fcb31-e33f-4354-8bdc-aa94ac19d8c5", "ef907f66-da87-4b13-82c9-ab181afecbec", "9b0d6415-7740-493b-9a44-cd5a3aaf7d13", "740217d9-52ed-4e40-a368-2dd50cc01d8d", "ca795ca9-59ba-4997-912b-09ff52304745", "b7135b08-5e3e-4701-9d20-ce9468e826c7", "a159840a-c64e-4402-a930-2a5554fdb450", "d4a07581-a9e4-4ad1-8e89-a38aac6f079a", "75ccfd70-2249-4b3e-94d7-6839cd83c778", "08d2b254-bdde-4efd-8e7a-06f18cba1baa", "6c086bd0-2d3d-495e-bffa-33a55cdf36d9", "6acc0c95-d875-4613-8610-ef4015b413fd", "c45b8827-2c7b-4c4e-b08c-be963bcb4723", "de581595-f4e7-493a-9c05-a8464b0e794c", "bcbb8b0b-c9b5-4f44-a081-4efe37dad3aa", "aa9fcfd9-a11a-4dd5-89de-815d3b83b0d5", "d5a23980-dd6b-4bc4-9de9-bdb0a64d3018", "cdb23195-ff6a-4fa8-a7a5-c6a07d352dac", "bdfda74e-24bb-4e95-a143-6a8e3467a638", "aa58176d-ea29-406d-9bcf-5d34aa5dab89", "0acb8cb1-6e16-4f99-880f-cf6dbe5bc867", "834ed8e7-f596-4bbc-8560-54ecceb13371", "093c16ee-170f-49d2-be11-dd5e16e84aeb", "95e37b08-b3b6-4bb9-be1a-4c9351b77a3f", "a789d0e5-367f-4420-b723-f24130d3bf5c", "c4b036bb-f145-4977-87f1-ecb1c17a2d37", "1d5275b2-71a0-46cb-94e6-97dee041d00e", "6c1f3995-67f2-451c-8839-a55791bebc8f", "65faa0e3-e146-4ce5-88b5-cf22f879afe9", "b0412f5e-93aa-4f67-95a8-d482d39cbfd6", "9cc1d81c-4140-4313-9787-81ce77bf8ac1", "36363011-1120-43db-b3e3-fc0d1810b739", "6d196081-f067-4f9f-83a7-7fd7e918cc47", "a550f6e2-6e70-4320-acfb-d4878568553c", "9f0519f2-5e8d-4c0b-a0df-eef1ed9929ce", "47c57a76-4bc3-4dad-b25d-ed7523b85304", "e7357c55-81dc-4084-8c5d-8093fa6868b7", "6919eb0f-63de-4090-b6b8-129017b2ce6c", "e08d802d-4c3e-4599-9d52-f193741f319d", "bde27828-e6be-4770-ae2b-58763be28ddb", "ae6fbb47-6cb4-4d15-9cc0-51d5fa8a3cdc", "014641be-e7ab-42a3-92f2-0f1bd7038785", "ffce041c-2665-4d39-9521-d0544e331bb4", "5473394e-23a3-469a-b334-862600ccc619", "25c0446d-81ca-456b-80f1-4bce41d1b828", "f7d3365d-df08-4936-8522-e28c0ea54b5e", "d4caf388-9558-42d4-be52-07473b5c5666", "352da421-fb3a-4ae8-ab77-4b13552a425a", "84314055-0f03-4138-ac94-b0eeaa8d2413", "c20952de-7755-4ca1-b614-3f248af3bb85", "39e50d86-4c32-4a52-86ed-4ed50a98693f", "cff100f8-481a-4770-b9be-06c363f9e15d", "085a6f53-a455-4d21-ab9a-eec4eaf4a413", "254381f1-7218-48d4-b950-c02677074873", "f277422e-3e8f-4cfd-854e-23d65bb7ac1f", "700efc3f-8618-4459-8280-c2f12a0baeb3", "54beb635-5444-411e-a512-2fac95901cdd", "61ce1819-7253-4549-856e-eba686e56983", "c5d5e7fa-16be-4165-a8fc-d0d72b36a255", "40583b0d-9815-4e55-8c22-e088caf3ae16", "86445103-3997-42ca-bbc1-1cb2f6df273e", "e5d9e8f6-896f-4ac6-b283-957d8d25f1a7", "612dc8be-abd2-4f34-bba9-f37c461f9ebb", "d3b81710-74f6-40e8-b4c7-b85558a4cf5c", "45dbe881-b7d9-432c-8eeb-97c457995be4", "95569fe4-6b1a-4b8f-8645-6a1518ebf147", "81da4f34-e765-41ea-aff5-9bf225435cfe", "455116ac-4f68-4141-89b5-2eb940fccc32", "41f650f4-924c-4cf9-981e-77834d0df780", "9546716f-ed71-4f4c-9a1e-235940d6c249", "57f7a476-d8f8-468f-97f7-c9f6cce97cb9", "0dae5e39-ab6e-4e64-b754-288903aa7a4c", "b2a13496-06f2-4a06-b9fa-ed9391a16b12", "66ac48b3-0183-4cf8-a9af-0864cb6b82f0", "a52eb297-77a0-4ef1-b3fb-f93ebac7e60d", "7fdc2a59-9008-44f7-991c-3f8b53b26a96", "364d956b-896a-45c7-8286-bdbfafcd59d2", "c3ca74d7-b4c5-4a14-9040-c12dd722777e", "a7f369c1-654c-4aac-b1f7-80cabff1be14", "690662df-c4ee-4adb-82a3-ec607bf3a4e4", "08f4e80d-38a6-4eb3-92a5-f3e1f31a7bdf", "182c9022-0a66-4a54-a0b1-896409b156f1", "e3e51562-d148-440a-b717-70541ab2a372", "d063a6f3-7844-4591-80af-72014570c8ff", "7fb0839c-bd58-41ba-862b-a88239c304b8", "e7405003-dec5-48d0-923c-b11dddcb8c17", "62e4511f-14dd-4909-8f85-ce7e46749849", "aec8b991-d807-4438-801a-4a1eab70ceb4", "faa6f58d-1705-4523-95d6-f9cd2d181d5a", "186ae934-b2fd-46ec-9f8c-bf30af97f134", "096db7f6-33fa-4ee9-8ffa-90eb58bb3293", "6176aafa-9309-49f9-93df-634e6f34ab8f", "07285404-98ce-435b-9069-053dea5eecb0", "700b820d-7a5e-48e1-808a-0ab0aac3e1e6", "cf7ca23f-6838-4ff1-abab-dd443b84da37", "51cc3f3b-2a9d-4fbc-abd7-8ed44ff32c77", "03c5c698-303d-4a5d-ab8e-323dcd3829d2", "f88f8fa4-f946-4e76-a3dc-fe22584eb647", "a3f13bc9-118d-4a26-b51b-26386fde6fa6", "6abf387b-a0d9-4a24-91b0-5036cc14c805", "6495d3fc-101d-4694-905b-5575e3fbb2b7", "c4fb9d84-e7a6-4466-b211-3280cc9cc0df", "42895f62-3e8d-4395-9264-1f3786934b7a", "23b9910e-3887-440a-b3ba-9c05b5f1d71b", "1062cc1e-28ed-4b37-8465-960e9c7316d7", "af96e777-8e6c-43a6-8a44-9c9fa7e3fb4c", "3b787eaf-97de-457c-a1f3-166f640a7416", "432ca6c3-1af0-4b51-bc4a-79781cce183a", "3488e2e5-1638-4be5-b066-5b50629fd121", "df97ac8c-1bd5-48af-82d9-0b2acea45d95", "e213a7d8-d437-440e-ad01-60172f77d1ff", "d89b7b29-9b71-4a15-a004-6e5ab6242af3", "42616817-c368-418b-b4c3-f67435396f9b", "cec812b8-07bd-474c-a35c-e2a17a85970d", "4588df1e-9e69-490c-b75e-c136b3d73229", "219cd3af-167d-405e-adf2-86ec7dace7ac", "35e9a18d-e69a-4e00-a2a5-658b1afa2e07", "cc6683c9-d7c2-43ff-a762-e345d7ab280f", "14f92f7b-213b-4925-a0c1-750522b2d0cf", "ca9658ae-d6d5-4b79-8baa-445d8c194af2", "97afdf3a-8f6b-4bba-9def-2e12251908bf", "989d3fd2-3a32-4df3-863a-aff0d9e6d5c4", "69af4999-2384-4e5b-a2ed-f8a50723499e", "59d365ed-06ab-4c1a-96d6-887862ad2c62", "3aff52a9-0551-48c7-a643-57cbd40168b0", "93ac8471-95ef-49c9-8d81-d35fac819a2b", "b8e0698a-9c55-475b-af11-efd14e30a941", "880d5c0d-a4a2-45ff-b26a-e9d19d12a055", "794d6a0c-089a-4a9f-8170-29c20b3ec300", "ecfdef26-b97e-4ad0-bcbc-f8ee5d4a4970", "811e812b-53e4-48c5-85f5-ad9c130b1f5b", "757b2a07-cb08-43e4-b855-915e793cbc7d", "1f81c1a7-c32d-49b4-be38-48559bae2c10", "ab2cdb79-4e87-4bb0-84b2-3f6af4bf1959", "3b6630a7-b7cf-4ac8-b0a0-aefe1c4838e1", "fe632d2e-75ac-4dcf-b9ce-c2112e0d96b1", "00404da5-017b-45e1-9395-ce4bebd3c9e8", "a25272fc-ad0b-4ee8-98f4-d95344e83cca", "fd4e9c62-9120-4464-8fb6-bcf368f5e8d0", "955c63f1-3690-4d8e-85a1-b2be77dc98ce", "babc6b05-6b92-4cec-aa10-fd20dc009ec9", "53cba9d9-7973-4bc8-9080-f2510ef7bc36", "c9c3e7ff-2e2c-4db4-bfeb-cd7a787119ec", "9e0bbc2d-6193-48cd-a07c-80474253322a", "3f194125-734d-46f4-a9fd-ddd29f5e2183", "be1ce379-c349-40ad-afdc-71a7936e60c6", "c3296cd0-6365-4c5b-a6d0-411e8c3d0577", "58aae4e0-a962-4b38-9a6e-a687ec00f30a", "33fb6106-4d90-4c8e-a437-8a357e114dbd", "616c86b6-7abc-4204-aaba-a811e22464f7", "29cd9cd4-b945-4b31-a61b-a81974170bbf", "2aa81b72-9837-4a28-89c4-7263550df613", "4d89b50a-b72f-4cb0-87d8-8cf7b505ac9e", "4fba19b2-0cce-4256-9f0c-99191571e2d1", "0634617f-9e21-4686-959c-80dd984f5d11", "ff12285a-3231-40f9-99be-c91d13278f97", "29c922cc-48a1-416a-966f-a3ffd0edeee5", "35072137-7803-4223-9b54-60db59633a2f", "49faee0a-80fe-4fc7-a2be-8e51ff8d268c", "2f5a8a63-1257-4665-9df1-b62bf40f4cd4", "d55b5eff-093b-4db5-87ac-0becb7be3b3f", "c50c2988-5b65-4178-a57c-59a678975f91", "5986e111-543e-41f6-b606-acd521d4d469", "95604239-1db6-4289-8b4b-157830379b87", "67666908-fe9d-4d3e-a628-b012f49263ad", "a0efd862-3924-4f9e-bb6e-1fa824ad1fe9", "351bfba7-c40f-4b24-936c-695c8922400b", "7cf7d9a4-6f59-47a5-9e0b-8101ec6ea48a", "912e3efe-ce86-4e91-9af8-3699b294517a", "d9c1f9ec-ab20-4a1d-b35b-312c5299601f", "e59e4445-a4bb-4179-bb7a-9137b7d7f70d", "942783dd-0d25-47fb-82fe-d40113812f52", "7c0d2760-f3e6-4bf8-9e60-0c29d165c69a", "bcbea5b5-fef2-40cf-acb3-84e513365e95", "ab830d4e-5aee-40e7-9563-705aa609ee32", "4f9e84de-6297-417d-91fa-dd284764cb8c", "13557eac-bdb8-4e98-b6c9-6e1502fd8977", "2d4a2448-bd3f-47ef-8120-61f108a3931d", "515f35bc-6805-4539-8dd6-c0bc8442c7df", "7d3b68a9-9684-45bd-b513-5355cc342493", "0df8094a-0fc9-46d2-bde9-db17da7e0798", "d200200a-c5dc-4197-a791-607008c88d14", "ffd57124-4e66-446b-930a-d1498b742362", "69f8add1-0f66-4bd8-9706-b9f8bac2b648", "66d9162f-b0df-4d11-b4e6-232647daa677", "155ce6f9-70b8-4b9d-b137-36b9951b8036", "b576c41e-e76f-46ca-b572-35158a29b8bf", "d6a25275-5b62-492f-85af-622d4d555dd2", "2a75dea0-7dbe-4161-ae51-b5fe341a666b", "5aa5aaee-7a64-49a8-a060-ede2373823a3", "ee0bec49-a86a-4348-888b-79d6fce5b11c", "e4740a38-96fa-47ae-a521-176c04c48be9", "5cdf0da4-1529-4a44-b8f2-c5d9ebf5f0fc", "869f1ddf-9fc3-4f1f-aa3e-9b811531a093", "d2ea7cc0-8426-4708-af74-94d178188962", "1e745c01-a8ca-49ee-a307-aca3a88eeb62", "9055b746-7c49-483c-ac7d-1cbeedae37b7", "e3b851b9-8e1d-49e8-8bb8-9eaace3b36a3", "7006d86c-2845-4db9-8817-df0a7e5bb69f", "f91eafaa-5a9f-466a-bc47-5f0ac4a3eb45", "2036e1f9-6e05-4fda-a8c8-d447badce37f", "3ae182f1-c658-4a98-8cb9-ad409c89333a", "8fead42c-fede-4032-a21c-e015976afe68", "c9a603ef-68e8-4ae4-8b90-52c5af768453", "ae816606-c3a5-4306-831e-946db4952968", "7d4cd77c-5acd-4d9d-a5c0-095110f05de4", "4b5f0d66-e1d2-4ed6-a7f6-76c7baf52b09", "ae428e09-e434-4705-9982-207ed1ad74e6", "6e221fb0-a26a-4695-a84a-823b7f3e46e2", "d4fec36c-0dab-415b-97dc-9648b7597a47", "891dd5e8-bcb7-440d-9f34-c50f05fe5b8e", "cdd9e12e-4f6b-4580-96e6-076a5400dd7c", "2f6fe182-a7b5-42ab-8590-08f808cce910", "ce21bb77-3b5c-48a3-a0e7-bf3e91b62861", "7be05165-dc5c-495c-bdad-233b71645d77", "415501cc-5c85-41d4-b498-139b1e5b59ea", "dc0f2644-9886-4bd3-b613-4bf23c540d67", "1ec46459-5948-429c-90a7-bba74d930e53", "839a4b9c-5a25-4c58-9b36-1cda8b399b23", "05eb5f60-62d6-4053-ad8e-a59f7feccfa5", "a6439e08-9f9f-4a55-9322-a745e30da0e0", "dce9210d-fb37-4476-b0b2-78c6f20831bc", "fedc3f76-f24c-488a-8d16-f1d72ab3c4fb", "255f44fe-4ddb-42d6-af44-4ee3e484b8e2", "110eeee4-be40-488e-9482-bd4d6d425399", "dfb31c8e-9ed6-4981-b84b-e61e33472f5d", "4967230d-0a80-49a7-b7be-0dce5a9df235", "2bb79a49-347f-46fa-97bd-a7665ba268a4", "3614c960-f31a-41c0-822a-58b78a90fa8b", "96f5db1d-6112-4a0f-8d59-bf99cea35d8a", "3256e618-3a73-483f-9a7f-42dd04087711", "4b8852e3-782a-45cf-b088-58a1b924c43f", "6d367467-67f1-4367-9aeb-564f16645f66", "29bd39e9-f988-4835-b452-655d140b8d61", "4a03c3f0-95a6-4e06-856f-21c6a5ff98a7", "4c2dae3b-4994-488a-a8c4-dfc64b97ff0c", "5fdd60bb-42b4-4a39-a711-8beccccf373f", "979e6a6b-efea-41ca-a7c9-9668f04fbcdd", "a90ad3d1-bc04-4489-a049-5c713d52d173", "c3b62386-aba7-4f94-9000-7f1a28d2ebab", "e5ea3efe-cc55-4008-a4bf-bd0dbb048e94", "f1af395a-2de9-4ead-8640-5d061341b348", "1cab2a1b-a166-4690-b494-ec86a00e79c8", "c0e635e7-5e5a-4090-9806-04f1987465a4", "f00071e5-7b83-466a-8454-e7d253f262dc", "9d1ab84e-9be0-459c-8305-c9353a13e749", "403511af-cbfe-4c0f-948d-6a9ecf7ae6ec", "9aca5a0d-bfa0-4caa-887c-829d210fc9e4", "a093a811-9296-40fd-ae21-bb64d1c3926a", "3da0aa0a-6c63-496b-8464-917ae9813f4d", "3ae4bc93-ad47-47fe-9487-34b3926c9cc1", "b91fd9b6-be6b-403e-a1ae-e2ff2b383e0a", "53f66baa-e165-4c00-9798-78b2af372b1f", "af635190-17ea-4bea-9a09-53fc3fa5cde0", "21c4379f-21a1-49d0-91b0-d3bba46ea2f1", "81137b75-4255-4120-bb72-1ae7efa1325b", "9c157a5b-ebcb-4af1-b248-ec80a7ce3ba4", "d46f79da-9279-46dc-9a5f-f5a3deec2845", "730acaec-9cd5-438d-83a3-906f6fac6c1a", "1f260bbc-837b-446f-906d-401583250bb8", "6c0c9484-6d53-456e-a2c2-36be82b612f2", "f88cf895-0e45-4513-97d7-bf3006ec340a", "b93cc96d-a274-4302-9907-e1a8740a8550", "496a8931-174b-4ec1-b4d0-8240115aa89c", "7781f9a9-0ad8-430d-8953-c9a8dcd59017", "86db7322-ebb3-4d85-936c-bf9e304bd9ea", "6e08f6c5-1c1a-49b5-93db-fa0d798ff82c", "c8393f43-baa5-4c12-b51f-37ba84411a70", "bab49fba-279d-4ddf-99d1-6706521a71a5", "77280196-f248-4806-b532-4e77f0d5125b", "946226d0-b703-4cd6-a543-3ec9a1c0454d", "81a99583-c412-4cbf-b317-4575c17cff32", "f1dfcc71-ce5a-485d-95e1-c39f11ad469a", "09fb64f1-efb2-446a-9970-76715defe156", "94893e9c-a839-4894-a278-be34a33dbe47", "54d5a007-6cca-46c6-a9d6-1c883f484a9f", "25c44ae7-fdf8-4ce7-9e9b-e1a8dc7ee78d", "6cbc6819-3e97-4906-8796-87e467f514b9", "092ce679-7ef7-441d-aeb5-ce1821c9d7b1", "9edf7264-654d-4cee-9cda-9c5181aea82e", "9e0ad855-0f6f-4517-ba5a-6601e91c61c5", "a3b6b725-aff0-4f13-aef5-d15c79f568ba", "5129a96e-3298-46f8-af07-6aa5354359b7", "e20345d0-f37a-47b3-a84d-5870f8ed09eb", "4b889ce3-2f21-4aef-9699-6d9017f20a4a", "ac5edc31-3f0a-4263-b238-52ba25d2428f", "bade3cdc-cbad-496e-92bb-6aabafd99284", "0b30648f-be34-4884-8ea0-279f164999cb", "e54e6309-4eea-4571-ac63-383e7b7d0e6e", "9d380c2f-0971-4951-a89b-a02488b1c89a", "71fb7290-95e8-44d9-a3af-772e78899ad9", "7e25fb1b-ec22-43e9-8955-2848f47cc53a", "5ee6c5a4-35f2-4936-9314-9f197f4ba2d3", "fd845739-d1a4-4871-88ae-b7d5cdd0764c", "da92202f-20c9-4ce3-862f-27adfeccfafc", "07c81a94-1fcc-4c02-a931-6deffb9413b1", "28e282d4-d383-4ef2-9734-e105eab0a626", "355812e1-e2a6-4667-aa2e-98acdd554b75", "ecc06446-ed65-4244-ae9d-7d15416170c4", "eb4bbb3a-37db-413d-b122-5ff0f92dd0f2", "3ceebb5c-7a48-4865-ae1f-4aca7ffe04b0", "092dab60-215c-4a6b-a318-baa4a71cb6e6", "8b56582f-8d09-4ab2-bcac-a56d4e598d0e", "668b0c4b-a595-4f8c-a94b-7cb830568d45", "d9852ee0-ed1f-4fe0-be4e-226bfe2780b8", "149e944c-9220-457e-8aad-8d978e045fcb", "3129721a-98df-4fed-8867-8a80ed5acdcb", "bab2dbe0-29f6-4f3f-952c-82fd967d2183", "5aa5a4c6-3c47-42ca-bbc5-9834a733c697", "a1063a3a-0623-4ffb-9966-3a2e837d464d", "7da6c025-4ad1-4704-8ffd-47687a86d70c", "5d999c37-1884-43d5-98e8-af8f1cd7dc7e", "64fa903e-9660-4635-a981-d294f06a84d5", "d4d84da8-a760-4268-9aef-a65ec802e334", "b07c3f13-1d2b-4d31-ae22-c49fe559beea", "f7ce6b27-91aa-452e-996f-f57452dc85ab", "17871de6-4898-4e90-91ed-8033633283e8", "b349a5a0-326e-4005-b01c-255633ac3afc", "2930af96-d7f1-40fc-b07e-559ed43d60df", "6d86b3b8-ac8f-492e-8472-3fc387043a69", "13b3685c-15b4-4115-b0c9-f3dcbb9508f7", "fe681619-8376-432a-b85e-338bca89aad7", "22aed3c5-de6d-4318-8f0b-ccc5ca4b0dd6", "bc9eed1e-c16f-40c2-8a18-4e4725dac437", "d31bd6a6-a254-4ae3-9f8f-5073d707bdcd", "cf8aa5df-3520-47fd-b204-afb421cfc025", "14a35976-41ea-4c11-a2a4-43db46bf3f6a", "2d0ed9d0-6464-4d90-8711-abe0602cd051", "694a7c54-f39c-4995-9140-01b087727fea", "18dcadee-14c1-4814-bd7b-c294c45c679f", "a44f06a5-9aff-4e26-8d2b-99ce774131ca", "5d7573c8-6fde-4e3a-b49e-73d3cf1c3ee8", "45cac001-01a0-4da7-8f3d-60263e6f879b", "26da1a0d-8331-459f-91ff-1949035b46f8", "f14bc4b9-19b9-4a56-8b64-1d6bb977a0bb", "eb17ffe5-d1f6-4f6b-b55d-cdc2aa66bff7", "73afef95-0f70-4310-a611-ed3a2059ce0b", "1323a387-36d6-438f-b7d0-dfe2a2deed7d", "1f54059a-c723-4c42-96c3-2524ce7e09d0", "a1a1c3af-82f5-41bb-b6cd-d37020208ba1", "738e008e-920d-4eda-8be5-51b0e3023658", "a518875e-e461-4b06-87cf-29b517f4324c", "e2edc79f-58f2-4eb9-b2ce-a68cd8131bdd", "240e6559-3551-480f-845f-c231d7e880c3", "71d65038-5759-4e21-a35a-5812d4c33515", "ea9c5dfd-0ea6-415e-9b7c-9dcab087c833", "b7a40572-a6c4-4da5-9588-f6084f3300a1", "a8b55d47-4aa2-4b87-a158-ee5cb59645d0", "705a606f-907e-4ef8-98d2-6c2e0131b8af", "faabdd7c-f5ec-4d94-ace5-edaef19fdb6c", "0889bdef-1ead-4d33-9a3b-1e6c833481ec", "8c20904a-ed17-475d-a8bb-2c30ba333adc", "498efbbd-a7a3-4878-b748-fd8a566347f4", "5045030b-9dbc-43f0-9e82-2cb8315425d9", "92a4d7fc-1742-4762-b5cd-306554f29e1b", "1f3f0c93-7c01-40f0-a8e3-4837856ebdcd", "e02b1a27-c64a-4020-8a40-e3f42e544108", "376e6950-0ba6-4126-90f6-0efeea3846b7", "1a21dd03-afd3-478f-9fed-ec84bea64f17", "8bcfe243-3553-417f-ad9c-db3ff9c79752", "40ea12f3-392a-4db0-9196-31f2e75ac941", "611c51f7-fdf7-4315-95da-5695bd1832c0", "95c07636-ecaf-4d9c-8814-7665fd813129", "91038a72-7d16-4de9-a5cd-c188a75ac4c1", "2c5c9baa-8321-46ed-8347-753933ecc87e", "6d40ad33-f7aa-43a8-a87c-360b9fb55fbb", "74773f3d-aebc-40f7-9188-3b1aae82615e", "e6f4ab6f-6382-4a68-9385-24c615625fa7", "ef709998-c023-4d7e-9fbb-64bbc42a62b5", "42547948-7501-491a-9364-bbb7a01632d7", "c0a9226f-82c2-457b-bb4d-7dbdc124e13a", "51596bd6-9f3b-4986-ae08-0f2ba67eb203", "2e5d58b5-ca35-485b-969f-f73f725495ac", "9c7fb20d-7ece-4f12-8497-7c61878d52e6", "ce8eafae-149d-4fed-a046-d986dd294b08", "293bc8d5-1d1d-45ad-8b36-ad037f8a5dc1", "aa44127a-443c-4bb2-aa8d-9456a87183f8", "dff9eb92-be49-4a38-a041-75be8d101e4a", "7972a5ce-1d08-422b-83d3-3defc34635c4", "5cd26bac-e963-44d8-b266-0ecd93c8f5f9", "85726e01-c8bb-4b72-8133-f4322bb100b9", "92a0cb7e-7781-4f99-a9da-6e7d7234683c", "2fe10dfc-4edc-445e-8275-a0d4fd0b3e5b", "07fcf5b7-3a1b-43e7-9372-ff3fd25691d5", "4a35bf98-122e-4660-b913-7811a812da0b", "b3545d8a-1238-41bd-85c5-2b52f6a2638d", "37e1c58c-75b5-41e5-9d8f-25053e7a7836", "0ac5ead1-0b00-49f2-9863-50354bdcd934", "49603d20-a3ec-4981-b1e5-fcd5466fd31b", "4c02a0d0-9eb2-419d-820e-1258c99ae8fb", "61344edf-cb22-489d-8d2a-c4d71df6be50", "c7d58f76-0d5a-41f3-b0c7-dae162d95354", "c71e28b5-35bc-4174-99c1-b62a8933aab0", "eaef2d03-3d2e-4eb7-91e6-2a44756008fb", "b85294bb-faf3-4ed9-bd44-14ecf3da9d3c", "2c69b642-ea21-4cf8-8d2a-39d6a7bbc361", "b5b12580-b1f6-44a9-a84b-8c0cb919481a", "32777b9b-e11c-46be-a408-4d6931b3ec90", "b16982ba-8d3b-44ab-abf9-80e7bfa2336f", "1deec3be-1a85-413b-8f19-5855ace959f1", "c3b8f589-e6a9-4d30-9336-5dc750073020", "1c7917b6-836b-45e8-b13e-bb0b75865073", "95c4701b-a448-427c-8b5c-fa0ef38e5aa1", "5dc6a3c7-c6bf-4a08-ac84-36df7ffe83d1", "1acacba0-04b5-42dd-8988-0f8922a683e9", "920c5b5c-04fe-45b8-a89e-729379fbf001", "1096dc7c-2770-4004-8e7b-62263bad0667", "5b8c28f1-7f40-4db7-88f3-afee7af17880", "d9b99f43-2498-483f-b04e-bed888bd8796", "6c03beec-36cf-4c0b-b997-7e4b6bab09cf", "b22cfe5a-b9a3-4e71-a552-7fb268b8d4c5", "e7cb19a8-1fcd-4795-87f6-cb465efeb9d6", "2932113b-b43e-45f7-860c-fd6c431c4a07", "9a05bf37-1625-4711-be55-81a1380b787e", "85d2d350-ef7c-47f0-9b93-c6d8cca2594c", "c8ed8d72-8a04-41ed-9fa4-81c5e773db9c", "b94db02f-479f-4c37-87d4-87c7c50170f0", "c1b894a9-7003-4f7a-a787-2b661cb3da7b", "16ea94b8-d9f8-477f-a23f-d638b9d09b34", "83582d80-e9e8-4e4c-b926-96a556371f02", "4c0e1591-8a5b-459e-8bfe-b66a1fcf767c", "e5a8d498-6b22-4b49-985d-82c37585e9a3", "d682a81c-1f9a-4ca5-9ae5-9fcef30d40a7", "886e17c4-6041-4933-98c8-6fc49b3c9174", "d2ea5f50-ce5d-4a38-9780-da61dc90ee12", "60f46972-4e33-46e3-8ecb-4d20e13c1a05", "a28da878-830d-4c9f-aa01-e3c48a9339ae", "1575b6dd-14bf-4610-b839-01130495ea45", "74ecfdce-10e1-4ffd-abfb-121f3f1b0c74", "325d4916-0f12-45ff-b0f1-3a3941054eb7", "439f34f6-6d92-46e3-bdf6-715286b6bde2", "fe741f39-589a-41f3-bedf-f7c0f392e16a", "93f78c61-9740-413d-b768-9912cd3161ef", "88eba853-64cb-46ff-9745-fc38c9666b66", "805833df-bcea-4177-93d4-81a549b5b783", "452bc98c-b51c-48ec-b3ee-b380a6694e8c", "d3df5068-0141-4827-b583-b773ae1f5d9d", "f2720dc3-4669-4a19-8931-0cb9f4aedfc8", "edaefa7f-3e0f-4197-bd2f-2a21bc6c5825", "6b94705c-6231-414b-8837-306425a652e2", "db3e8b23-f911-483b-907c-e2034c2c6619", "444806d2-21e8-4dac-81da-2d64fb82874d", "4738d7db-d5a4-418b-87d8-d339f43aaa9d", "9492e792-71ba-48a9-ad92-d107fec88cd4", "2cae41a0-1340-4553-8c9c-2bea5804282e", "821d7380-66d7-4bda-bee8-5a015eb5274b", "64948313-8678-4ec7-9b72-e4efde89c47e", "3014e910-a107-44bd-96f1-01ca21a4162e", "d6446025-b127-41e4-8ca6-bdb54cb306e7", "b46503b1-bae2-491a-9ff0-68e8a87e97d1", "7e9d71a0-bdfd-4bc2-a449-a4e229fe2d2e", "38af156f-141a-42f4-b5a7-a06e5cc94324", "feef5e1f-17a7-4f08-bc6e-d44dacd74466", "6e440c97-48ed-4abe-a86e-cc2f85db3302", "a847bbfb-3bdd-4a42-9f70-93f7c3e01045", "c5599d78-d420-4c6c-b2cb-34664566ea98", "04e5553b-1e36-4398-a203-82cab3a97862", "ded6cc16-9f03-48dc-b90b-a5b5f3b03ebb", "499f9f47-a0fd-4700-8766-2142378586f3", "17b6d99e-9d9b-4f01-9dc2-1678807a7542", "84361864-b821-4556-a1a5-a31862178d2e", "f21722f3-752f-4e76-939b-1429ee6d8f0b", "5c77ea63-5345-445f-98ad-bc69576269e2", "969a5fde-1aa8-49c0-9d42-1278d227edf3", "30dd1149-5ef6-4b19-9a04-1765c33e6357", "4a5afb10-541b-4ba5-bc50-08d570df9b4a", "f2700c23-4a8b-4b21-a0e3-fb643fa832bd", "0406f467-70ca-4e52-a125-5b56713e3d32", "b6bfa95e-9911-483c-8cab-301b588a3d30", "f46ef4dc-a8ad-4996-b30a-a4d1437349a1", "5c78eade-4392-4784-adec-d0ca1263fab8", "5572979b-2863-44cd-870d-45d52c262bf5", "6b24f08f-6ccd-44e4-92cb-f1b3d01e92bc", "ca18ce33-4362-44d5-a406-6c27a0f7861a", "5e75e3d8-1d45-4315-8837-3293744b4f73", "30455866-ee22-48b1-a060-d2ba0f715c2d", "5314ffab-e934-4121-95ff-339857a4d727", "cd61513d-e844-47cb-8fca-0d6b4863cc35", "a3e31b32-a8d2-46f0-85c1-030e263106b4", "98209a41-983e-4d93-bdd7-7b6fe9ace351", "effe17c5-6c45-4886-9061-8deb8777eae7", "540a4284-abfb-469d-adf7-efdf2de6e063", "be0ca02e-4045-4bc9-8a40-86124675bb1b", "9ae8827d-c5d5-403f-ac27-989575a368d0", "17b70296-1704-4418-be14-d803ce2ecc58", "1ba67c51-e33b-42d7-99eb-d7de474219b2", "7ff32c28-a48e-4e1b-9b6d-608f353fef98", "8581e29b-16dc-4f42-bc24-47cb29731d88", "6a3d36bb-ce49-48e0-bc98-77b76c8ac681", "25055c3c-e8cf-42ae-a948-5953dc4b97ba", "2d80ce78-54b9-4216-9c4c-29a5c55a42ec", "55a0e6a4-1259-4dd6-aa9e-714e6dcff2c7", "09f02465-7111-4748-b6e8-73752d699022", "43570a47-e3b3-4d49-b7bc-8c9eb0132a0e", "d28de995-adf0-4861-9b60-eff1bd7dc124", "0404cb2f-970f-4d1c-85f7-74f1b5a9bb92", "ad4f2620-64e3-4f0d-bdb9-f9754e4a87d4", "a31e210e-d07f-4e17-96db-44873154d745", "09239c16-5b95-4566-8027-af1115b90ffb", "57dd61ec-fd44-49ad-8777-e972d4ff2ee0", "9d6d45c1-1b08-44cc-813d-3104c9d48101", "6c15d03f-d486-49eb-8f4a-eeb97cadb4c4", "080c4498-a345-4c55-acf3-3ffc2b1d5347", "76f1cdb4-f8dd-4964-957d-9f9b7f2e3ffa", "b30ae036-584a-41c0-ab11-09b566a27014", "8347e777-f555-484f-ba9b-687f777dce65", "b07a199d-3d88-4932-8360-c436e7624a7b", "18f2dcce-f902-43c9-8004-213e3ce18d2f", "20816ca8-bcbb-4b1a-be21-b5cf03d5375a", "97ab9268-d664-4cdb-bc6e-01a11f926629", "7c0be403-436c-4820-9984-136e9b5d21c7", "ce22a1e4-9c08-4287-94d3-2569b25c1850", "881f882a-c04e-4abd-b470-9608b6ce84f5", "815b7478-91e6-48ee-ad73-5bcac53aa1da", "da32abdb-18f3-48e9-9a4f-0532fd6cc95c", "ef0d3042-1780-48a4-a7e2-68ac91d40ef9", "9bf70dab-412b-4c9c-b211-86e70caab207", "653c70f6-f074-4140-b88e-3cf8998f75a2", "f226afe7-e029-4a9f-b1c9-454abe410d7b", "6d2c585b-1fbd-4787-942e-8eb3c9d8b979", "585507d7-11dd-4785-9536-1d503cc157d9", "fbc8a8ff-692e-48fc-afad-81ec2e7777e2", "337fcab1-fcb4-4a29-9db8-82b36a72ecfb", "262c1bee-ab30-4056-8a25-480f8304420e", "2e55fa52-a34c-4427-b497-a98c074589af", "fe71d38c-30df-4a7c-a675-873b589f5b1a", "ff83caf1-b91c-4092-a387-039161a5ddbb", "69a7e9fd-2701-4ce0-a07c-5449976d4818", "43b9e5bb-0376-42c9-a8a6-07c2e8a15b9c", "b34528f6-bfda-4d05-905e-4637888d1f3f", "666ae9cf-7c37-45c4-aa63-8ed6fcedde2a", "e5eb25d7-3e3f-4e7d-966c-2aff19f0d129", "bc26c442-0638-4ecb-8ede-0d9fad749592", "dfdb0fe1-039f-46db-86b9-0a10d5744edd", "a50c3e99-7093-45d1-86cd-a513f3206a01", "f7dd4111-a43b-4e70-93a1-45a75074060b", "b943a226-2bd6-4d14-a178-5e1cc7732450", "f28eb04f-c87b-4361-a74a-8b1d453840d2", "38bc428d-6a40-480a-af6e-c4a4491d85c9", "ced0748f-8840-4758-808f-9c7c25da7617", "6c619a43-f240-49a7-91fc-c5bc46fb2fa9", "c8798be2-44e1-4bc0-bc4c-027e842b425a", "b4f1628e-9280-4b3f-8ba5-96d2081b87dc", "644a0b92-3b13-4fdb-8fde-e0b6861b6c3f", "ec66bbb0-1388-48e6-b91e-430c75f57caa", "836469b3-50f3-431d-8d1c-a0f6e11e23f5", "1e78c7d0-86cf-40d5-90a8-3d34d076c8f6", "1c22b4b7-6bb3-4d7c-a1d9-2a38853a04df", "06b7671a-1846-4f01-87fc-b0a41e658fb4", "99a2f09a-5222-4606-88b0-cafd574b8889", "ba72a6f6-4f8c-4898-a36a-efb97d735117", "8b188f3a-afb7-4cf3-9edc-19eed509bc96", "bca09590-b1ba-4ec9-a2fa-a5c243d12804", "ee44a729-ae02-4e9e-8cdd-aa947c80dc58", "8cd4a27b-8e2d-4e78-8f6f-4ba7ca98ebeb", "0b67da66-6933-4e1d-9364-b7f11e18922a", "27578e8e-c44c-4da5-892d-93679b7939c7", "dfef5d01-f7f9-4978-8a6f-2b91e90e3fd7", "36134c7b-c6ec-425f-862c-8d08d2867463", "1d2d7877-32f2-40f6-b7aa-3ebab6f2c72f", "856fbdb6-50d9-43d1-907e-728e8e1c8bad", "c6f1ed54-5a96-4bc7-bdd1-c9f47ab02bc7", "e8c866d8-522c-4e53-9a29-5081916909e3", "5fcefd32-5a5b-4eca-bd48-b9d30470907a", "b0e678a5-7403-4096-9648-173728b26a64", "4adc7211-611e-453d-bf69-52cb7daa068c", "83435386-7aed-4c05-96e9-2661e2393c7f", "103ceb3c-99ab-4c6e-a42e-352c80efb7a3", "fc8accd4-c848-4489-b58c-6a07aad18472", "aa4e8f0a-adf6-444e-ad33-ff92beebdaa3", "a4ee8a04-86a5-4017-9525-56e63de561d5", "a69a73e9-37d5-4bdb-90aa-27e37626b018", "8aae115f-7f07-402f-a31a-6c7d5a2a82e5", "6f2836ce-f722-49a5-880a-2bd978f5824f", "7a39664b-9d72-4b21-b589-3145f8d4321e", "b1ee681e-2b56-47ce-ab40-dfb695afbd7c", "9905f06a-f993-48a2-ad7f-9756ab1ebcf2", "55488926-b0b1-4a6c-90bf-291b7e7dd687", "e88e1403-b9fd-4768-897f-fba304b8c821", "df2e7508-c250-4143-a6f0-b956977316b5", "f26eca50-4efd-42cd-be49-1b4055ad1b92", "eab23254-e90b-4b4c-b8e1-edcca3a1ca8a", "071c7c1a-0cea-411c-8400-38dac6ef5213", "15187e20-014d-4d6e-b797-a6d6824ec412", "d0f9b502-fc8d-4f2b-a99b-eb674e5affcf", "9138c195-ffd6-471c-9b0f-35c6f2bda660", "4a65a2da-9355-4c04-8c5f-73de32763679", "9dc5477f-4a3b-49d0-a325-d1d8faa6a08b", "fbc99444-b30e-47a3-ac23-14e533c66fa4", "bf0828b5-f174-4c8d-942d-f2db7aa26b4a", "f238aee1-b43d-458f-ae16-81150b5dd412", "b8236aa4-0fa4-4a03-8812-213d0e9dab58", "af402f64-e59f-4d61-b4d2-9f44e7591806", "9a29e474-b92b-4d2d-9eab-bf29e5dac261", "41a7601e-f84b-4cf6-a699-ba87c0f930f2", "90caea20-c716-4371-89e1-f4ac63612885", "ab6286ca-6f9f-4c76-82f5-7129afd57a7c", "1d7adba4-1b08-45eb-993e-19df387b769d", "d5c8ca45-9cae-4545-b92a-5a54ae6256d5", "01188e94-90f7-46cb-bd85-aef6c0902393", "feb82ae1-21c7-461a-9656-5530dab86794", "94ba5eab-52d1-4f7a-835d-0706fac6d63f", "6cde88d1-71f6-4571-9f86-d3336b290a1e", "3d49cc93-23e5-4276-b0e5-60a91fabaffa", "ac6f6fbc-9e6b-4b31-bd0c-e289765b4457", "e97c892e-f6f6-4047-a7ae-f89a861ffc31", "db6c83a3-317a-4e4e-a31a-35c10b8aa6f8", "e1676b5a-d733-447d-b786-492ee36fb73d", "eb932f31-ca9b-4aa3-9929-5fd821dd6bb8", "7fba77f6-783d-4784-b197-98359743af41", "24b373cc-49ce-47d6-a7e1-148bb7a8cf2d", "81b8fcaa-24b0-4e36-a70f-36321c55cc4b", "20cb007b-35b8-4e3d-9872-83a789531773", "4ec8e19e-fdff-485e-8be3-d33993fce252", "47fd4105-9146-4e1f-b55b-8ee60eb703eb", "5f2b5735-1ff7-4060-b09d-a23956e48381", "c07ae06d-5414-46c2-a759-d048cd1f91c9", "079d05e4-9356-420d-8533-711778f054a5", "d007f427-36c2-40a8-9b31-ed2ef439ac49", "52c10101-23a3-4cb0-86d6-6ddeed17061d", "30c91376-3c31-4fb6-9da7-bc245e3c2586", "fafda3f3-18f8-4366-b744-f92a4f01b495", "a6abbff5-2dca-4d09-a566-103a46b86814", "524b05d7-cffd-4276-951a-cdacdaf9cac2", "0eb440ad-0ad1-4b53-8d52-cace16ab2cf6", "2bc37f51-7861-44bf-95a5-cacd69bbf16a", "9ea98d20-19fe-43b2-bcdc-7a64359de16f", "25c6d623-7212-495f-9203-dcc218943be7", "5dbe6947-ec88-4513-8499-5c02866e0b1a", "bcadae36-028e-45c6-a88b-3832c3fc3524", "216f561c-5899-481d-bd8a-524430ade7d1", "6e3eff4f-3d13-4bf4-a6ef-fce5a831e7f3", "e45ad1e0-2af3-49d3-a545-2adf212a4013", "ca134844-683a-495d-a69b-ad9c66a4040c", "dab772df-fad7-4e74-a241-96b18ada7f01", "96bd5939-4069-4e77-ab9c-3be9bb5f5b62", "e6d9ab09-6c64-4e95-a6a2-9868f680646d", "379daacf-db91-4803-8797-738959d62c68", "020544a1-99cb-4974-aa81-6c77428d9a61", "2cbe8c40-fe58-4443-87fe-4153b1f157bf", "1714cc7c-31df-49c3-ab78-61ae75f057d6", "7d9c121a-4000-4387-960f-62caea151cda", "789229fb-a1cd-484c-a3b2-46341da8ac31", "3741d8c8-eda7-4c82-b775-dc9b223061d5", "ed070ac8-242c-436d-979f-e5dfb6a0a8c9", "04b13447-624c-4234-8f00-ddc6e36c6261", "8c368101-cf58-43d0-b0f5-405073a07265", "5b3eb67a-149f-437c-9947-f25de38686ad", "e55d245d-d07f-4a06-923b-df6c86366071", "6d50a217-465b-45ec-b02d-f640e3b65384", "f6312498-1b40-483c-94e8-69ab9cfe89e3", "e8aae986-aad5-4687-8947-c0651f775ade", "c192aa80-849b-451e-8c65-b717f84ec28c", "58be4bf7-c6b8-4d6b-a0f5-3e0551cd1c49", "eb14c19c-3e70-47b0-a6c1-af75e5370765", "2b596c03-cdbe-4d95-bb27-acb23a20f45f", "066934b3-376f-4c44-963d-582da2c082ee", "fc4eb05a-8cb8-4116-8635-524e9867c4cc", "ebfbbaaf-6c29-4d73-9dbd-38dc04cf6cda", "57e1fc5b-0ed8-4eff-ab60-7dd03efb5f08", "745fea69-17e1-4cf3-94b3-a2ca2cc93d52", "ed2262d0-d291-4519-81c0-fbbdd34decc8", "16149cf3-726a-481c-b7ae-7b8e3b9a06d3", "c4bdd6cc-2e56-482c-bd50-78786efc968d", "ce895fd5-493c-4fc9-8cb3-44afeb12d472", "cc8d696d-c1d6-4de3-b73d-d86d9caedd88", "bcbd7cde-e8ee-4d53-abb9-c1d605e01cc0", "98725ca6-45fd-40b2-a90b-3b0b031018bb", "262df308-d299-464d-9709-654347f75a42", "aa8a5198-b3e1-4f5c-8b73-378e209cc5ba", "f60b9a44-dd5e-4f45-841f-a61ed2ea1403", "bd3504db-ef98-4219-8390-ad39c8c56d9d", "75dba363-5554-4930-aef5-b2e7e72fc1ec", "4e2999a5-e19b-459a-829d-4feff5e69605", "46da3bf6-3a19-4591-b4e7-e58c2df3d70a", "be2059b1-c993-4075-ba87-0a9a4876b6a1", "e5aee2ea-a677-438a-92db-4f9006ffa7ad", "643344ab-ed1a-4ddf-a835-06cf20584250", "a123fbb6-9e49-4f80-8ce3-8531619a2e99", "79117bf7-9333-4a0b-8199-07ee218c7e87", "b5e7937a-96c8-41db-b598-53e6ff7a6646", "331f3848-841d-4b98-afc4-6a3da91974a6", "a89816fc-81e8-4d35-bb2f-a2c436d05c16", "ae5c0bec-4778-429c-a77f-e3f6bce7963c", "b8c2029a-08a8-4292-bb9e-381318b954b1", "cbe33f32-cb4c-430a-8e25-2833f8866afe", "fe114e0c-bcc9-490a-ab09-add26ddb6382", "dc7db30d-7fd8-4df3-866e-da87e169ba26", "b34b9390-01c5-4c2d-8183-d8ebd67ece64", "2fa7d627-bb0d-4614-9edd-00136a8223cd", "772f82ad-0231-4d09-b381-dc5d72d95e00", "a32bb209-d3ab-4683-ac5a-4a50097bd8fb", "d2dfc24e-c6f6-4b85-a15d-244cf347e221", "79ad8d9f-5a09-45a6-96df-6f80d24405bb", "5677f329-585c-4409-bcaa-e3ddf3f21d72", "8631d5a8-8c11-4dd5-b7c2-e3610a5e2c1d", "f9efa154-016b-4c2a-b511-5a84ec784152", "d41b5648-bf48-4c47-9696-1cbaaa778a5c", "d54b9520-11fb-4782-93f7-3fc906b24ff1", "7aafcfc8-3cb8-47ee-ad29-1a1568015e8f", "6dc89e6e-7f79-4749-8ae9-16dfbc06ad5c", "63ff8cd4-5ad4-4cdd-955a-fb227c0a83a6", "c9566a54-0a82-4e1c-bdd0-17619158433e", "d7eaba30-e1fe-4de3-808d-26f7d1d2f3d6", "dd6a23d6-f218-4c4e-ab2f-45a34db82c7e", "1355a4e1-6c67-44b4-9b38-6655fd46ee2d", "cb36c6b2-6823-4b87-b4d6-4d3f536b354e", "2bf501ac-e516-4c53-85fc-fa976cc093c4", "77d837d8-7954-41d2-adb5-48465d6ef4cb", "63715421-8ce4-4bfe-b926-4ee480c97509", "4027d04e-7150-45ef-bc0f-ca04bc17db98", "194396d4-069d-4aa5-b5b9-568268f53966", "b9a7dba5-c48f-40d1-986b-fb8ea4eeea9f", "b943049b-79bf-44ea-910c-84c6830e1858", "14a323dc-0a44-44be-8bc9-98a41d5b6a7f", "f65e451d-bfbb-4d35-9a0b-5b9cccba1f0d", "73f090ec-beef-4045-a6a2-9d91019279ab", "60a03d25-6aff-4e41-bc3c-61ecacf1287d", "85814af2-e9f6-468f-a04b-5c9383455f9b", "e532e794-2d4e-4e9d-a45a-5283435cbe30", "bce3834a-780e-4202-824d-144f2398c763", "733521fb-4cdb-4355-8db3-077f451b6a1e", "3792bbe8-915e-4b73-b991-62852b05a285", "c07f7f64-1137-4da6-b539-a0fba5a7e33b", "375a0faf-c84b-41f9-aeed-5e7aaec40b75", "ce787730-d0c5-48b2-ad6a-569a719433be", "de251c54-1177-42fc-bd1d-7a80c0d74fcd", "35d63551-ba33-4ebb-aec0-49f214ae7e66", "42da53ec-3b5d-4bf6-98d0-ef789de5cb84", "47dbb703-fd1a-41ad-872a-56cdcc4be76e", "241a2d14-fa78-4dea-86e6-1bb8df72785e", "72caf107-b306-44d8-9ae7-5e1e1bc21ba5", "1351b7b7-a4e7-4d81-a70c-cf97e97973d3", "bf18eac3-1a40-4459-bad0-ddd3e216a85a", "384d8554-a949-4192-aed1-83d9b0ed4b2a", "778a92e3-c67a-4c2f-899a-284823717b0a", "9dfa46b1-ac55-4438-b46c-f0e6e4f3f8f1", "2f1d1648-bc76-4143-a663-c5db3f03a01f", "37931bc3-97c6-4a26-bc5c-300b56bc7ba8", "29e6dd38-d85e-407d-ab21-1bf999ba448c", "a65c47d7-65b1-4b2b-b47d-6b0d8b119e84", "5e100849-6a24-4de3-b941-2efb8a7e1dcd", "b7cdf619-ac55-439f-a1f5-4d8f5d82f99a", "b5f742d0-3a6f-4fb3-98be-c8b77f829063", "9a7481be-eba8-48ad-9c86-ed4e3bdd1e64", "4d58f025-3af0-4221-a8c5-43da05af3b58", "fca08b82-c7d6-4d24-98f0-8411111f33b1", "fbd4ea09-d525-434a-be61-7773af642d22", "fadcd762-e251-4528-b598-7c4d8983ed21", "71960f4d-b5dc-4073-9207-da469975e416", "f35544ea-5ea0-4105-b9f7-2ec2dad46bdb", "c5a7c9e5-69ae-48dd-978f-e82b6defd827", "34fcb400-073d-4b70-9fde-5fab73e9365e", "5a7c92e8-71cc-4887-9e9c-49d1a639b382", "0b127134-e4d4-4c6c-a36f-85e8f0393045", "a9134fcd-a411-4f98-9779-eee1e98b8b5b", "22e15aeb-1c90-4bec-aba7-21d322c73146", "77e32929-73fd-43fa-a6c4-d41df96f366c", "1fcc4b26-1815-499e-b8b9-2c7b5d53871d", "04467916-4416-4ccf-b050-6170335a3157", "dcb0925a-7174-46be-b129-6ec42e64675b", "832b9eee-2266-4b1d-8b6b-64c35a4a1b90", "92e31890-eadb-4919-a7db-87bed8926fb3", "ae96b1b5-7ba0-4ab0-9731-24197ba56570", "9ca5eb17-b95d-40b7-8f5b-47ee10db6afa", "02b11afa-ce2e-4aa2-9253-c1fe35302bd0", "409097e6-8d73-4a44-b2a6-56078b988ac6", "119e3691-4bb7-4980-b1df-bf4a55324d6c", "bb24e12a-242f-46e0-af3b-8fdf8d5758fe", "936b39c1-b714-4718-961b-53dc7797d09d", "17b9cc56-82a1-4841-83ee-5dce2aa40635", "31b01bf0-3307-492c-afe0-7229c3d85fa3", "8b2f129e-4481-44b9-adf7-fb2720e09ced", "24859b29-a3ea-42c7-9577-f3e5f708c844", "36570aa1-8771-4b18-b9d2-0c7d42b87ccb", "e4914e87-fae5-421e-ad75-3c21a73a6665", "abe8e5cb-f99a-4c55-97a2-3f4a6080beb8", "25b4d4bd-c952-4e40-9eed-af7eadd747d9", "0aecc149-6458-4984-89c2-daab8a9a2d18", "ba5811fb-4ad0-46ec-bed7-9aa0491a71b8", "f131a84a-7b7f-4220-87a6-b71617cacfb5", "c50d2dbc-59bf-4da4-9705-07120474cd09", "21b11587-dd3a-4cc2-a120-5d92c953e2d8", "0008d989-47ce-41a5-8ea1-d07bd161ac55", "7d6fec03-c63b-4db5-874e-7f2b0e53824b", "7b38d1db-a8dd-49d2-9e4a-5410e0462a60", "ece62c27-00c5-4907-8204-44f42aa4f83c", "4191c316-bd5f-47aa-a4da-28ec81d7de88", "7eb0f7f7-7af8-45b8-b316-74589a8376a1", "2e2b91d9-0b45-4dbc-a9b0-b507a2acec02", "cb37433a-bb55-47c9-8696-889f36d9a188", "9397ae15-7e37-4d6b-8e7e-2c32ee4a1805", "a35de6b2-34ee-4b8e-a838-d2bffb62bf27", "a363da84-0efa-4250-8956-07f98d870449", "9cf97173-98a8-417e-a319-eae2ab22fce8", "8a6dd772-4525-4a20-87d2-c9e7ffd920d0", "78a7aef0-2ea7-4f55-83f0-dd1319f282d5", "4a0d157c-3379-4047-96e9-4a52fb403ac1", "13dded4f-225a-4a43-a4ea-b552be510614", "8bcf0b40-86c8-43f8-8165-8350447e3692", "59331595-90e9-48ea-93c5-4dae43721d24", "6b96aa91-3327-4d94-a271-155962b1cc7f", "d6c90984-f636-4d04-a29a-c6324e0bf2ae", "38cdb584-5486-4857-b69d-0bd473e6bbee", "90b08268-8e7f-4f3f-bbba-37a3309c088d", "f357aec5-6b19-4d4b-a33c-7af27d7a9c0e", "5ccce43f-a9bf-45e0-9e87-1f22e8c9b2ef", "f97c811a-03ec-418b-8eb3-e24161f195f2", "57f0902e-649d-43b6-a41b-d6031f2a572e", "c5589445-716f-48e7-b5f4-e50f81a8b5e8", "9edfebce-cda2-44e7-bf93-22f1471af47f", "97aa53d8-3958-4d51-9b01-3aec6eb1b4a1", "438f8dc5-53df-49e4-9ee8-b1ef9b6a129f", "e1252243-36f1-48a2-82b5-2f450c0eca67", "65c2052a-2aca-462e-a47c-79262bbc21c6", "a540e60e-c64b-4a8d-a490-41870f386ce6", "a6a97211-221b-49c5-88b6-2c01f313ca02", "808d1512-1f22-4294-954d-15dce378b092", "aba2add0-9fc9-4cca-914e-7a03af84e852", "3ed751a7-875c-4061-a51e-39ed7a1a63b4", "c902ecc8-41f1-4a31-ad05-0de98e694a77", "066a6326-763f-4494-a458-53350b36806f", "934bf8f0-f438-4364-8583-f6f3bd4bc27c", "0470d06d-df1d-4c0f-865b-64b7c566502b", "8f59cd69-4b89-4d65-a5f9-c158df2b2562", "e9c5f531-af7b-4a41-b0a8-f13cf8ed4034", "38896ebb-e8a5-48c5-95a8-ec0c73e4e43e", "98785b88-b462-4f3d-9689-13c8524b4738", "bb8c9d31-dd2e-4f50-8295-467ed70ae70a", "7e95e5e0-aed3-4fc4-909d-b17407e921b8", "e63b6c3c-b796-45c5-a3cc-c297ee6ebe19", "e0975843-1213-4bb8-b0f7-89433f6505f8", "bea604cd-e1e7-4cfd-a819-e8b4c124fa80", "9165083b-e56a-434c-9346-206b247c42ff", "db0abb44-02b1-4900-9cea-30cb8a34eb30", "6ad3bf0f-bcab-4da1-af98-0656886f010f", "50c532d3-384e-4328-9afc-5dcd8cf84de1", "76785645-0ba2-4bdf-8141-ecfa99b51dc3", "55be0b16-6a7a-424d-a068-f34d04f82894", "e2afaa55-5537-4763-a075-920b698f586a", "c3663e02-759a-4398-a62f-06e0bb4b213b", "4b39663f-c96f-4aec-9e40-a291061d8137", "e0a97f98-df2a-43a3-a15a-243ec97e6d21", "42563b7f-cbcd-4519-82d7-ca67adad7b27", "2bcf57ea-ff46-42d1-ad9d-99deacf38369", "10e9d4ea-46ce-484d-949d-0b8ba30a7c43", "fad2e5e6-9a2c-4936-96aa-85e293c2ded1", "81918138-c432-473d-b792-3f072be98160", "2596d581-ed4a-400e-8142-70ce63a64ae7", "42f0b20d-734a-4f63-866b-b6ecd606d953", "460a4973-530a-436e-9f64-ac43ac154551", "5f534f88-e5ad-49e1-bd89-b799513f4c00", "1e602cba-2b97-4c95-adf8-23e950e42dd9", "9c397935-cbdb-4af5-ae96-623262270cf3", "25dfca0d-0b15-426d-9106-1fa2a9fa6e1a", "3a1f4ce3-1c43-4e97-8d11-fa85315fd754", "54ffc58a-716e-4ca2-a1fe-8c09ae37222b", "868606b4-3614-4597-be8f-c25b3da8b6f0", "4bda94d3-f684-49ae-8fa7-10ffb5ecfe8a", "84475cf5-b042-4415-9527-eb8121b9ea99", "c6621a32-a894-4955-9a4d-755481e70816", "85784adb-42aa-4c09-8217-5f1fc3a0923b", "93dbb6bf-40a7-4ffc-9e39-62061ae9274c", "42c5c024-e12f-4a21-9ce0-919a80fe387a", "24f57b00-4dfa-437a-8b73-2bfc6ae90afa", "05271871-334c-43b9-99ee-23accab4be00", "845821ef-55a0-447a-983c-e931e9eadc39", "17f505de-eba1-4fc0-9d20-a5db5f1f3437", "f0af8857-c8d0-4783-9e30-b3dc17316850", "8fc3e581-2e27-4273-9f62-b610581d35c8", "bfd51746-8f4f-4e5a-9cb9-1891f4612a37", "63a946f5-614e-4f0b-a3d9-4d7eba44d239", "fb0eaa3c-59a6-4896-b0f1-b139b803fc82", "8fd2084a-81e6-4942-a108-fd4194781dd3", "79e1410d-f31a-4a94-ad26-af8fe015f2f0", "b50a540f-c634-480e-aa83-fc30b1fd5f25", "4d8265b9-81d0-456b-a1f0-53e01195baea", "86d9973f-21b0-4ebf-b925-02dad85aa679", "1440f2be-e94f-4f2c-a37b-67fd11eb46de", "8090ba14-7f38-4daf-b862-09e90de0e046", "c0d4cb8b-7723-4251-8957-2d8d906cbc46", "6d802a6d-3f8e-4591-8fda-adc517463116", "6b9e9e20-907c-4eea-a6f7-db8536fafdb7", "892c0e72-d7d4-43a7-b673-7ca4b14dbe2a", "a4b48e49-421d-4f99-a9bb-a6b1513b5a86", "6bd47f03-3543-4d87-9eca-4c146c8476d5", "0c6d92c2-9c60-4670-805c-5ab17c5de6b7", "ec17855c-fbc3-4722-bdb6-df7f56be2c99", "054268bf-050f-4f8f-8d16-d655fa7ebae5", "9d4996d3-8df9-413c-b554-a4c3b8670281", "64e63e70-92ef-4fd8-93c0-1a3442e48122", "8bbae6c9-cec0-4940-96d2-de9dfeea7913", "c574c077-3358-4a46-afeb-89a7034053a8", "9a3ff54e-1669-4043-8d76-a7171553b662", "2d101f06-5fdb-412d-bb11-284b477b526d", "f5eede01-7991-4960-b26e-abb4b6112057", "d2128023-5b6f-4293-87d9-f9b6ce300d82", "6c01809d-808d-4575-b9e4-5df076d1b87f", "77b8d862-2e2b-416f-a1ef-61c02e3981ba", "55bb919e-075f-47f2-930b-44b9f987b2e6", "5c7717d8-3424-44f0-8f14-a1b33d61052f", "da6b6896-c646-42c6-ac16-224663617bde", "30afc2d8-2758-449e-88d5-79a729e8b528", "fbd270ed-190a-487e-9aa5-e16107ea5572", "66f42a79-e531-48d2-839a-be0e2e64c06d", "a12b77d7-1761-4301-a1e0-6e8426f7426f", "d49eb40a-2874-42a9-b371-c80448c30d6e", "9d9f5dee-f7cc-411d-99cc-c8342fd0e285", "830126df-bbee-4e7f-931d-23d2e7f81a64", "66e1ea73-e973-43e7-8cc6-b1a147c72a15", "66d3c1cc-acaf-4bbc-b4f2-c7817a03d7da", "3111c0d1-1abb-4b24-b802-42bec515d202", "b29643f4-3f54-4505-aa74-e439bfb13dff", "50bffb3f-4038-4968-8849-57f4fb393859", "5c8d75b1-16ec-43cc-bf6a-66f7ff571d77", "0f14e350-6445-4b5f-a34f-55f1535b0a61", "9eeb9fe8-f453-4370-b6f3-34bf5ae9ddee", "15efe044-f628-44a2-9ebc-edd428b40cd3", "9486a348-5e69-4f3b-bdd6-9d03f4e3c688", "0ca63949-82e1-4217-bd3f-6e5bb73e27fd", "e2c38443-be9a-4f29-8991-640546120884", "427a174b-7c18-44cd-8b6a-8a994bc62455", "e241febe-6412-48c5-8a16-5e44d034c987", "436ad187-cf6a-4df3-8999-fe7889f88800", "d83bf9c5-91f3-4873-bd79-797f72f16f13", "0cf124c4-29d6-4f97-8a63-012168912766", "36d0f6d8-2f4f-4809-ab3a-bdab1fdb215c", "aa12d7ec-b1ef-4b86-83e2-74e52e6daddf", "d385fdd2-c297-4c6c-ad3e-72160865aca6", "2ea28564-d0f1-4c23-a5c8-59e6384fdbac", "7620fdfe-7417-4199-94f6-2cda47a57c32", "9f92600d-e49e-488b-89da-ae0f1e80d943"], "metadata": {"window": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. Devis Tuia (EPFL)\nProf. David Rolnick (MILA)\nSeptember 1, 2023\n\nContents\n1 Introduction 2\n2 Object Detection 6\n2.1 The framework . . . ", "original_text": "\u00c9COLE POLYTECHNIQUE F\u00c9D\u00c9RALE DE LAUSANNE\nENVIRONMENTAL COMPUTATIONAL SCIENCE AND EARTH OBSERVATION LABORATORY\nAutomated monitoring of insects\nMASTER THESIS\nL\u00e9onard PASI\nQUEBEC ARTIFICIAL INTELLIGENCE INSTITUTE\nSupervisors:\nProf. "}}}} \ No newline at end of file