Confirmed that content IDs that don't match to media manifest are in both TreeOfLife-10M and Rare Species datasets.
Browse files- notebooks/ToL_license_check.ipynb +149 -0
- notebooks/ToL_license_check.py +37 -0
notebooks/ToL_license_check.ipynb
CHANGED
|
@@ -913,6 +913,41 @@
|
|
| 913 |
"eol_df_media.head()"
|
| 914 |
]
|
| 915 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 916 |
{
|
| 917 |
"cell_type": "markdown",
|
| 918 |
"metadata": {},
|
|
@@ -1153,6 +1188,120 @@
|
|
| 1153 |
"No, we do not have any of the missing ones in the older media manifest."
|
| 1154 |
]
|
| 1155 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1156 |
{
|
| 1157 |
"cell_type": "markdown",
|
| 1158 |
"metadata": {},
|
|
|
|
| 913 |
"eol_df_media.head()"
|
| 914 |
]
|
| 915 |
},
|
| 916 |
+
{
|
| 917 |
+
"cell_type": "code",
|
| 918 |
+
"execution_count": 34,
|
| 919 |
+
"metadata": {},
|
| 920 |
+
"outputs": [
|
| 921 |
+
{
|
| 922 |
+
"data": {
|
| 923 |
+
"text/plain": [
|
| 924 |
+
"treeoflife_id 113471\n",
|
| 925 |
+
"eol_content_id 113471\n",
|
| 926 |
+
"eol_page_id 9762\n",
|
| 927 |
+
"EOL page ID 0\n",
|
| 928 |
+
"Medium Source URL 0\n",
|
| 929 |
+
"EOL Full-Size Copy URL 0\n",
|
| 930 |
+
"License Name 0\n",
|
| 931 |
+
"Copyright Owner 0\n",
|
| 932 |
+
"dtype: int64"
|
| 933 |
+
]
|
| 934 |
+
},
|
| 935 |
+
"execution_count": 34,
|
| 936 |
+
"metadata": {},
|
| 937 |
+
"output_type": "execute_result"
|
| 938 |
+
}
|
| 939 |
+
],
|
| 940 |
+
"source": [
|
| 941 |
+
"eol_df_media.loc[eol_df_media[\"Medium Source URL\"].isna()].nunique()"
|
| 942 |
+
]
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"cell_type": "markdown",
|
| 946 |
+
"metadata": {},
|
| 947 |
+
"source": [
|
| 948 |
+
"The missing info is distributed across 9,762 pages."
|
| 949 |
+
]
|
| 950 |
+
},
|
| 951 |
{
|
| 952 |
"cell_type": "markdown",
|
| 953 |
"metadata": {},
|
|
|
|
| 1188 |
"No, we do not have any of the missing ones in the older media manifest."
|
| 1189 |
]
|
| 1190 |
},
|
| 1191 |
+
{
|
| 1192 |
+
"cell_type": "markdown",
|
| 1193 |
+
"metadata": {},
|
| 1194 |
+
"source": [
|
| 1195 |
+
"### Check how this compares to Catalog \n",
|
| 1196 |
+
"Let's see if these are all images in TreeOfLife-10M, or a mix between it and Rare Species."
|
| 1197 |
+
]
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"cell_type": "code",
|
| 1201 |
+
"execution_count": 29,
|
| 1202 |
+
"metadata": {},
|
| 1203 |
+
"outputs": [],
|
| 1204 |
+
"source": [
|
| 1205 |
+
"cat_df = pd.read_csv(\"../data/catalog.csv\", low_memory = False)\n",
|
| 1206 |
+
"# Remove duplicates in train_small\n",
|
| 1207 |
+
"cat_df = cat_df.loc[cat_df.split != 'train_small']"
|
| 1208 |
+
]
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"cell_type": "code",
|
| 1212 |
+
"execution_count": 30,
|
| 1213 |
+
"metadata": {},
|
| 1214 |
+
"outputs": [],
|
| 1215 |
+
"source": [
|
| 1216 |
+
"# Add data_source column for easier slicing\n",
|
| 1217 |
+
"cat_df.loc[cat_df['inat21_filename'].notna(), 'data_source'] = 'iNat21'\n",
|
| 1218 |
+
"cat_df.loc[cat_df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN'\n",
|
| 1219 |
+
"cat_df.loc[cat_df['eol_content_id'].notna(), 'data_source'] = 'EOL'"
|
| 1220 |
+
]
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"cell_type": "code",
|
| 1224 |
+
"execution_count": 31,
|
| 1225 |
+
"metadata": {},
|
| 1226 |
+
"outputs": [],
|
| 1227 |
+
"source": [
|
| 1228 |
+
"eol_cat_df = cat_df.loc[cat_df.data_source == \"EOL\"]"
|
| 1229 |
+
]
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"cell_type": "code",
|
| 1233 |
+
"execution_count": 32,
|
| 1234 |
+
"metadata": {},
|
| 1235 |
+
"outputs": [
|
| 1236 |
+
{
|
| 1237 |
+
"name": "stdout",
|
| 1238 |
+
"output_type": "stream",
|
| 1239 |
+
"text": [
|
| 1240 |
+
"<class 'pandas.core.frame.DataFrame'>\n",
|
| 1241 |
+
"RangeIndex: 6250420 entries, 0 to 6250419\n",
|
| 1242 |
+
"Data columns (total 8 columns):\n",
|
| 1243 |
+
" # Column Non-Null Count Dtype \n",
|
| 1244 |
+
"--- ------ -------------- ----- \n",
|
| 1245 |
+
" 0 treeoflife_id 6250420 non-null object \n",
|
| 1246 |
+
" 1 eol_content_id 6250420 non-null float64\n",
|
| 1247 |
+
" 2 eol_page_id 6250420 non-null float64\n",
|
| 1248 |
+
" 3 EOL page ID 6137845 non-null float64\n",
|
| 1249 |
+
" 4 Medium Source URL 6137845 non-null object \n",
|
| 1250 |
+
" 5 EOL Full-Size Copy URL 6137845 non-null object \n",
|
| 1251 |
+
" 6 License Name 6137845 non-null object \n",
|
| 1252 |
+
" 7 Copyright Owner 5527063 non-null object \n",
|
| 1253 |
+
"dtypes: float64(3), object(5)\n",
|
| 1254 |
+
"memory usage: 381.5+ MB\n"
|
| 1255 |
+
]
|
| 1256 |
+
}
|
| 1257 |
+
],
|
| 1258 |
+
"source": [
|
| 1259 |
+
"eol_cat_df_media = pd.merge(eol_cat_df[eol_license_cols], media, how = \"left\", on = \"eol_content_id\")\n",
|
| 1260 |
+
"eol_cat_df_media.info(show_counts = True)"
|
| 1261 |
+
]
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"cell_type": "markdown",
|
| 1265 |
+
"metadata": {},
|
| 1266 |
+
"source": [
|
| 1267 |
+
"Looks like the problem is distributed across both datasets."
|
| 1268 |
+
]
|
| 1269 |
+
},
|
| 1270 |
+
{
|
| 1271 |
+
"cell_type": "code",
|
| 1272 |
+
"execution_count": 33,
|
| 1273 |
+
"metadata": {},
|
| 1274 |
+
"outputs": [
|
| 1275 |
+
{
|
| 1276 |
+
"data": {
|
| 1277 |
+
"text/plain": [
|
| 1278 |
+
"treeoflife_id 112575\n",
|
| 1279 |
+
"eol_content_id 112575\n",
|
| 1280 |
+
"eol_page_id 9634\n",
|
| 1281 |
+
"EOL page ID 0\n",
|
| 1282 |
+
"Medium Source URL 0\n",
|
| 1283 |
+
"EOL Full-Size Copy URL 0\n",
|
| 1284 |
+
"License Name 0\n",
|
| 1285 |
+
"Copyright Owner 0\n",
|
| 1286 |
+
"dtype: int64"
|
| 1287 |
+
]
|
| 1288 |
+
},
|
| 1289 |
+
"execution_count": 33,
|
| 1290 |
+
"metadata": {},
|
| 1291 |
+
"output_type": "execute_result"
|
| 1292 |
+
}
|
| 1293 |
+
],
|
| 1294 |
+
"source": [
|
| 1295 |
+
"eol_cat_df_media.loc[eol_cat_df_media[\"Medium Source URL\"].isna()].nunique()"
|
| 1296 |
+
]
|
| 1297 |
+
},
|
| 1298 |
+
{
|
| 1299 |
+
"cell_type": "markdown",
|
| 1300 |
+
"metadata": {},
|
| 1301 |
+
"source": [
|
| 1302 |
+
"For `catalog` the missing information is distributed across 9,634 pages, so that's 128 pages (of 400) in the Rare Species dataset that we can't currently match."
|
| 1303 |
+
]
|
| 1304 |
+
},
|
| 1305 |
{
|
| 1306 |
"cell_type": "markdown",
|
| 1307 |
"metadata": {},
|
notebooks/ToL_license_check.py
CHANGED
|
@@ -102,6 +102,12 @@ eol_df_media.info(show_counts = True)
|
|
| 102 |
# %%
|
| 103 |
eol_df_media.head()
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
# %% [markdown]
|
| 106 |
# ### Save Record of Missing Content IDs & Compare to Older Media Manifest
|
| 107 |
# Let's save a record of the missing content IDs, then we'll compare them to the [July 6 media manifest](https://huggingface.co/datasets/imageomics/eol/blob/eaa00a48fa188f12906c5b8074d60aa8e67eb135/data/interim/media_manifest.csv) to see if any are in there. The July 6 media manifest is smaller, but we'll still check.
|
|
@@ -129,6 +135,37 @@ eol_df_media_old.info(show_counts = True)
|
|
| 129 |
# %% [markdown]
|
| 130 |
# No, we do not have any of the missing ones in the older media manifest.
|
| 131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
# %% [markdown]
|
| 133 |
# ## Check EOL License file(s)
|
| 134 |
#
|
|
|
|
| 102 |
# %%
|
| 103 |
eol_df_media.head()
|
| 104 |
|
| 105 |
+
# %%
|
| 106 |
+
eol_df_media.loc[eol_df_media["Medium Source URL"].isna()].nunique()
|
| 107 |
+
|
| 108 |
+
# %% [markdown]
|
| 109 |
+
# The missing info is distributed across 9,762 pages.
|
| 110 |
+
|
| 111 |
# %% [markdown]
|
| 112 |
# ### Save Record of Missing Content IDs & Compare to Older Media Manifest
|
| 113 |
# Let's save a record of the missing content IDs, then we'll compare them to the [July 6 media manifest](https://huggingface.co/datasets/imageomics/eol/blob/eaa00a48fa188f12906c5b8074d60aa8e67eb135/data/interim/media_manifest.csv) to see if any are in there. The July 6 media manifest is smaller, but we'll still check.
|
|
|
|
| 135 |
# %% [markdown]
|
| 136 |
# No, we do not have any of the missing ones in the older media manifest.
|
| 137 |
|
| 138 |
+
# %% [markdown]
|
| 139 |
+
# ### Check how this compares to Catalog
|
| 140 |
+
# Let's see if these are all images in TreeOfLife-10M, or a mix between it and Rare Species.
|
| 141 |
+
|
| 142 |
+
# %%
|
| 143 |
+
cat_df = pd.read_csv("../data/catalog.csv", low_memory = False)
|
| 144 |
+
# Remove duplicates in train_small
|
| 145 |
+
cat_df = cat_df.loc[cat_df.split != 'train_small']
|
| 146 |
+
|
| 147 |
+
# %%
|
| 148 |
+
# Add data_source column for easier slicing
|
| 149 |
+
cat_df.loc[cat_df['inat21_filename'].notna(), 'data_source'] = 'iNat21'
|
| 150 |
+
cat_df.loc[cat_df['bioscan_filename'].notna(), 'data_source'] = 'BIOSCAN'
|
| 151 |
+
cat_df.loc[cat_df['eol_content_id'].notna(), 'data_source'] = 'EOL'
|
| 152 |
+
|
| 153 |
+
# %%
|
| 154 |
+
eol_cat_df = cat_df.loc[cat_df.data_source == "EOL"]
|
| 155 |
+
|
| 156 |
+
# %%
|
| 157 |
+
eol_cat_df_media = pd.merge(eol_cat_df[eol_license_cols], media, how = "left", on = "eol_content_id")
|
| 158 |
+
eol_cat_df_media.info(show_counts = True)
|
| 159 |
+
|
| 160 |
+
# %% [markdown]
|
| 161 |
+
# Looks like the problem is distributed across both datasets.
|
| 162 |
+
|
| 163 |
+
# %%
|
| 164 |
+
eol_cat_df_media.loc[eol_cat_df_media["Medium Source URL"].isna()].nunique()
|
| 165 |
+
|
| 166 |
+
# %% [markdown]
|
| 167 |
+
# For `catalog` the missing information is distributed across 9,634 pages, so that's 128 pages (of 400) in the Rare Species dataset that we can't currently match.
|
| 168 |
+
|
| 169 |
# %% [markdown]
|
| 170 |
# ## Check EOL License file(s)
|
| 171 |
#
|