All of these were made for a side project I've been working on, that is, making a fan game of the Pokemon franchise. If you're unfamiliar, the franchise has a ton of information regarding Pokemon and their moves, etc. These programs were made to gather and organize this data.
moves_getter.py: Scapes the needed tables off of the Pokemon Database website and outputs the data into sorted csv files. Uses a csv file of names to iterate through all needed Pokemon and prints out the names that had errors, if any.
import io
import re
import requests
import pandas as pd
from bs4 import BeautifulSoup
import time, random
namesFile = "names copy.csv"
URL_FORMAT = "https://pokemondb.net/pokedex/{value}/moves/6"
FILE_FORMAT = "D:/Coding Projects/PokemonCSVSort/pokemon_moves_problem/files/mark2/{mon}_{type}.csv"
df_names = pd.read_csv(namesFile)
notFoundList = []
names = df_names["Pokemon"].astype(str).tolist()
pause = random.randrange(1,2)
#Iterates through csv of pokemon names
for name in names:
url = URL_FORMAT.format(value=name)
r = requests.get(url, headers={"User-Agent": "table-scraper/1.0"})
try:
r.raise_for_status()
soup = BeautifulSoup(r.text, "lxml")
tables = soup.find_all("table")
tutorChecker = False
try:
#Iterates through tables on page, stopping after table 6
for idx, table in enumerate(tables[:6], start=1):
#Try to get a caption
caption_tag = table.find("caption")
if caption_tag and caption_tag.get_text(strip=True):
raw_name = caption_tag.get_text(strip=True)
else:
# fallback to id or class, or just index
raw_name = table.get("id") or "table" + str(idx)
safe_name = re.sub(r"[^\w\-]+", "_", raw_name).strip("_")
#Depending on which table it is, save data to csv file
df = pd.read_html(io.StringIO(str(table)))[0]
cols = list(df.columns)
if cols[0] == "Lv.":
file_name = FILE_FORMAT.format(mon=name, type="moves_by_level")
subset = df.iloc[:, :2]
cols = df.columns[:2]
subset = df[cols]
subset.to_csv(file_name, index=False)
elif cols[0] == "Move":
if tutorChecker == True: #Table for TutorMoves has the same header so a check is used
continue
else:
file_name = FILE_FORMAT.format(mon=name, type="moves_by_egg")
subset = df.iloc[:, :1]
cols = df.columns[:1]
subset = df[cols]
subset.to_csv(file_name, index=False)
tutorChecker = True
elif cols[0] == "HM":
file_name = FILE_FORMAT.format(mon=name, type="moves_by_hm")
subset = df.iloc[:, :2]
cols = df.columns[:2]
subset = df[cols]
subset.to_csv(file_name, index=False)
elif cols[0] == "TM":
file_name = FILE_FORMAT.format(mon=name, type="moves_by_tm")
subset = df.iloc[:, :2]
cols = df.columns[:2]
subset = df[cols]
subset.to_csv(file_name, index=False)
print("Pokemon done:" + name)
time.sleep(pause)
except:
print("Parsing error " + name)
except:
print("page not found: " + name)
notFoundList.append(name)
print(notFoundList)
moves_checker.py: Checks each csv file to make sure the correct data was saved to each file. This was originally needed because I had a less effective version of the moves_getter but checking never hurts.
import pandas as pd
import glob
import os
file_path = "D:/Coding Projects/PokemonCSVSort/pokemon_moves_problem/files/*.csv"
csv_files = glob.glob(file_path)
broken = []
for file in csv_files:
faulty = False
df = pd.read_csv(file)
name = os.path.basename(file)
base, _ = os.path.splitext(name)
words = base.split("_")
if words[-1] == "egg":
if df.columns.tolist()[0] != "EggMove":
pass
#faulty = True
elif words[-1] == "hm":
if df.columns.tolist()[0] != "HM":
faulty = True
elif words[-1] == "tutor":
if df.columns.tolist()[0] != "Tutor":
pass
#faulty = True
if faulty == True:
broken.append(words[0])
df = pd.Series(broken, name="Pokemon")
df.to_csv("brokenmoves.csv", index=False)
move-consolidator.py: Iterates through all csv files and combines them into one large csv. You may wonder why I didn't just do this originally. The simple answer is, I prefered to do these things in steps so I could more easily double check for errors. There are hundreds of Pokemon and hundreds of moves so I really didn't want any errors to slip through early on and create big problems later.
import pandas as pd
import glob
import os
file_path = "D:/Coding Projects/PokemonCSVSort/pokemon_moves_problem/files/mark2/*.csv"
csv_files = glob.glob(file_path)
ROW_FORMAT = '{pokemon},"({lvs})","({tms})","({hms})","({eggs})"'
currentPokemon = ""
count = 0
pokemon = {}
for file in csv_files:
df = pd.read_csv(file)
name = os.path.basename(file)
base, _ = os.path.splitext(name)
words = base.split("_")
currentPokemon = words[0]
currentCol = ""
if words[-1] == "level":
df['move_string'] = df["Lv."].astype(str) + "," + df["Move"]
move_list = df['move_string'].tolist()
currentCol = "LV"
elif words[-1] == "egg":
move_list = df["Move"].tolist()
currentCol = "EGG"
elif words[-1] == "tm":
move_list = df["TM"].tolist()
currentCol = "TM"
elif words[-1] == "hm":
move_list = df["HM"].tolist()
currentCol = "HM"
if currentPokemon not in pokemon:
pokemon[currentPokemon] = {"Pokemon": currentPokemon, "LV": [], "TM": [], "HM": [], "EGG": []}
pokemon[currentPokemon][currentCol].extend(move_list)
fullList = pd.DataFrame(pokemon.values())
with open("D:/Coding Projects/PokemonCSVSort/pokemon_moves_problem/pokemon_moves.csv", 'w') as file:
file.write("Pokemon,LV,TM,HM,EGG\n")
for pokemon, moves in pokemon.items():
# Convert lists to comma-separated strings of quoted values
lvs = ','.join([f'""{move}""' for move in moves["LV"]])
tms = ','.join([f'""{move}""' for move in moves["TM"]])
hms = ','.join([f'""{move}""' for move in moves["HM"]])
eggs = ','.join([f'""{move}""' for move in moves["EGG"]])
# Write formatted row
file.write(ROW_FORMAT.format(
pokemon=pokemon,
lvs=lvs,
tms=tms,
hms=hms,
eggs=eggs
) + "\n")
print(fullList)
dexscraper.py: Another scraper, this one to get these things called Dex Entries that exist for every Pokemon. I had forgotten to get them earlier.
import io
import re
import requests
import pandas as pd
from bs4 import BeautifulSoup
import time, random
namesFile = "names.csv"
df_names = pd.read_csv(namesFile)
names = df_names["Names"].astype(str).tolist()
URL_FORMAT = "https://pokemondb.net/pokedex/{value}"
target_games = ["X", "Y"]
results = []
count = 0
for name in names:
url = url = URL_FORMAT.format(value=name)
try:
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
# Find Pokédex entries section
header = soup.find("h2", string="Pokédex entries")
if not header:
print(f"No Pokédex entries for {name}")
continue
table = header.find_next("table")
entries = {game: None for game in target_games}
for tr in table.find_all("tr"):
game_cell = tr.find("th")
entry = tr.find("td").text.strip()
if game_cell:
games = [g.strip() for g in game_cell.stripped_strings]
for game in games:
if game in entries:
entries[game] = entry
# Append only if at least one of the entries exists
if any(entries.values()):
results.append({
"Pokemon": name,
"X": entries["X"],
"Y": entries["Y"]
})
else:
print(name + "skipped")
time.sleep(1)
count += 1
except:
print("Error on pokemon: " + name)
output_df = pd.DataFrame(results)
output_df.to_csv("pokedex_entries_XY.csv", index=False)
print(count)
move_extra_data: Now this is a very interesting one that I won't go too into detail about here but I could definitely talk a lot about if you reach out to me. Basically, moves can have very different effects and I had to put a lot of thought into how I'd format and program them. This program was to help scrape some of that specific data and output it in a way that could be easily imported into Unreal Engine 5.
import io
import random
import re
import time
from bs4 import BeautifulSoup
import pandas as pd
import requests
final_list = pd.read_csv("D:/Coding Projects/PokemonCSVSort/FinalMerging/ListofMoves(onlydex).csv")
df_moves_list = pd.read_csv("D:/Coding Projects/PokemonCSVSort/FinalMerging/movenames.csv")
df_moves_list["Name"] = df_moves_list["Name"].str.replace(" ", "_", regex=False).str.replace("'", "", regex=False)
moves_list = df_moves_list["Name"].astype(str).tolist()
URL_FORMAT = "https://pokemondb.net/move/{value}"
URL_FORMAT2 = "https://bulbapedia.bulbagarden.net/wiki/{value}_(move)"
notFoundList = []
contactValues = []
pause = random.randrange(1,2)
#Get MakesContact
def find_contact(r):
soup = BeautifulSoup(r.text, "lxml")
table = soup.find("table", class_="vitals_table")
# Find the "Makes contact?" row
for row in table.find_all("tr"):
th = row.find("th")
td = row.find("td")
if th and td and "Makes contact?" in th.text:
text = td.text.strip().lower()
return text == "yes"
def add_contact():
for name in moves_list:
url = URL_FORMAT.format(value=name)
r = requests.get(url, headers={"User-Agent": "table-scraper/1.0"})
try:
r.raise_for_status()
try:
result = find_contact(r)
contactValues.append(result)
print("Move done:" + name)
time.sleep(pause)
except:
print("Parsing error " + name)
except:
print("page not found: " + name)
notFoundList.append(name)
#Get range
ranges = []
def find_target_type():
for name in moves_list:
url = URL_FORMAT2.format(value=name)
try:
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "lxml")
table = soup.find("table", class_="infobox")
for row in table.find_all("tr"):
td = row.find("td")
if td is not None:
b = td.find("b")
if str(b) == 'Range':
rangeTable = td.find("table")
tableContents = rangeTable.find_all("small")
result = tableContents[6].get_text()
result = result.split(':')[0].strip()
#print(result)
ranges.append(result)
except Exception:
print("Page not found: " + url)
rangesFixed = []
def fix_target_type():
for range in ranges_list:
if range == "Normal":
rangesFixed.append("AnyAdjacent")
elif range == "Many Others":
rangesFixed.append("2Opp")
elif range == "1 Random":
rangesFixed.append("Random")
elif range == "All Others":
rangesFixed.append("AllAdjacent")
elif range == "Your Side" or range == "Your Party":
rangesFixed.append("PlayerSide")
elif range == "Other Side":
rangesFixed.append("OppSide")
elif range == "Both Sides":
rangesFixed.append("All")
elif range == "1 Other":
rangesFixed.append("Varies")
else:
rangesFixed.append(range)
rangesFull = pd.read_csv("D:/Coding Projects/PokemonCSVSort/FinalMerging/moveRangesTemp.csv")
ranges_list = rangesFull["MoveRanges"].astype(str).tolist()
fix_target_type()
finalFINALlist = pd.read_csv("D:/Coding Projects/PokemonCSVSort/FinalMerging/ListofMovesExtra.csv")
finalFINALlist['TargetType'] = rangesFixed
finalFINALlist.to_csv("D:/Coding Projects/PokemonCSVSort/FinalMerging/MoveListFINAL.csv", index=False)
main.py: So despite this one being the first I made...It's at the end for a reason. It's a mess. You can basically treat every method as its own program. Each one is just to fix some other problem. Why did these problems exist? Because my only method for getting this data on a large scale is to download tables off of wikis. Some of these tables have strange formatting or data I don't need or are missing data I do need. This program is such a mess because as I said, it was the first one I made. I wasn't super experienced with this kind of data processing and didn't know how much I'd need to do this. So this is really just here to show how far I came.
import pandas as pd
import glob
import os
def fix_gender_ratio():
file_path = "C:/Users/Emily/Desktop/PokemonGroups/*.csv"
csv_files = glob.glob(file_path)
# Dictionary to store data by Pokémon
pokemon_data = {}
# Process each file
for file in csv_files:
# Extract gender ratio from filename
gender_ratio = os.path.splitext(os.path.basename(file))[0].replace('_', '/')
# Read the CSV
df = pd.read_csv(file)
# Assign the gender ratio to each Pokémon
for pokemon in df["Pokemon"]:
if pokemon not in pokemon_data:
pokemon_data[pokemon] = []
pokemon_data[pokemon].append(gender_ratio)
# Convert to DataFrame
pokemon_df = pd.DataFrame([
{"Pokemon": pokemon, "Gender Ratios": ', '.join(sorted(set(ratios)))}
for pokemon, ratios in pokemon_data.items()
])
# Sort alphabetically by Pokémon name
pokemon_df = pokemon_df.sort_values("Pokemon").reset_index(drop=True)
print(pokemon_df)
pokemon_df.to_csv("fixedgenders.csv", index=False)
def fixheight():
file_path = "C:/Users/Emily/Desktop/pokemonheightweight.csv"
df = pd.read_csv(file_path)
df["DexBad"] = df["DexBad"].str.extract(r'(\d+)')
df.to_csv("fixedheights.csv", index=False)
def again():
file_path = "C:/Users/Emily/Desktop/fixedheights.csv"
df = pd.read_csv(file_path)
df[["Type 1", "Type 2"]] = df["Type"].str.split(" ", expand=True)
df = df.drop(columns=["Type"])
df.to_csv("fixedheightsAGAIN.csv",index=False)
def fixAbilities():
file_path = "C:/Users/Emily/Desktop/pokemonabilities/*.csv"
csv_files = glob.glob(file_path)
dfs = {file: pd.read_csv(file) for file in csv_files}
for filename, df in dfs.items():
print(f"\n{filename}:\n", df.head())
merged_df = pd.concat(dfs, ignore_index=True)
merged_df["#"] = pd.to_numeric(merged_df["#"], errors='coerce')
# Sort by Dex Number
merged_df = merged_df.sort_values("#").reset_index(drop=True)
merged_df.to_csv("fixedabilities.csv", index=False)
def miniMerge():
file_path = "C:/Users/Emily/Desktop/MergingFiles/sm/*.csv"
csv_files = glob.glob(file_path)
dfs = {file: pd.read_csv(file) for file in csv_files}
merged_df = list(dfs.values())[0]
for df in list(dfs.values())[1:]:
merged_df = pd.merge(merged_df, df, on=["Pokémon"], how="outer")
merged_df["Dex"] = pd.to_numeric(merged_df["Dex"], errors='coerce')
merged_df = merged_df.dropna(subset=["Dex"])
merged_df["Dex"] = merged_df["Dex"].astype(int)
merged_df = merged_df.sort_values("Dex").reset_index(drop=True)
merged_df.to_csv("statsandgender.csv", index=False)
def bigMerge():
file_path = "C:/Users/Emily/Desktop/MergingFiles/*.csv"
csv_files = glob.glob(file_path)
dfs = {file: pd.read_csv(file) for file in csv_files}
merged_df = list(dfs.values())[0]
# Merge all remaining DataFrames
for df in list(dfs.values())[1:]:
merged_df = pd.merge(merged_df, df, on=["Dex","Pokémon"], how="outer")
#Remove empty dex entires
merged_df = merged_df.fillna("None")
#Remove pokemon outside of scope of game
merged_df = merged_df[merged_df["Dex"] <= 721]
#Remove more pokemon outside of scope
mask = ~merged_df["Pokémon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = merged_df[mask].reset_index(drop=True)
column_order = ["Dex", "Pokémon", "Type 1", "Type 2", "HP", "Attack", "Defense", "Sp. Attack", "Sp. Defense", "Speed", "Total", "Average", "Ability 1", "Ability 2", "Hidden", "Height (m)", "Height (ft)", "Weight (kgs)", "Weight (lbs)", "Catch rate", "Egg Group 1", "Egg Group 2", "Cycles", "Steps", "Experience type", "BaseFriendship", "Gender Ratios", "Forms"]
filtered_df = filtered_df.reindex(columns=column_order)
filtered_df.to_csv("pokemon_bigmerge.csv", index=False)
def add_EVes():
file_path = "C:/Users/Emily/Desktop/*.csv"
csv_files = glob.glob(file_path)
dfs = {file: pd.read_csv(file) for file in csv_files}
for filename, df in dfs.items():
print(f"\n{filename}:\n", df.head())
merged_df = list(dfs.values())[0]
for df in list(dfs.values())[1:]:
merged_df = pd.merge(merged_df, df, on=["Dex","Pokémon"], how="outer")
merged_df = merged_df.fillna("None")
column_order = ["Dex", "Pokémon", "Type 1", "Type 2", "HP", "Attack", "Defense", "Sp. Attack", "Sp. Defense", "Speed", "Total", "Average", "Ability 1", "Ability 2", "Hidden", "Exp.", "EVHP", "EVAttack", "EVDefense", "EVSp. Attack", "EVSp. Defense", "EVSpeed", "Total EVs", "Height (m)", "Height (ft)", "Weight (kgs)", "Weight (lbs)", "Catch rate", "Egg Group 1", "Egg Group 2", "Cycles", "Steps", "Experience type", "BaseFriendship", "Gender Ratios", "Forms"]
merged_df = merged_df.reindex(columns=column_order)
merged_df.to_csv("pokemon_addedEVes.csv", index=False)
def lvl_evolve_cleanup():
file_path = "C:/Users/Emily/Desktop/pokemonlevelevolve.csv"
df = pd.read_csv(file_path)
mask = ~df["Pokemon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = df[mask].reset_index(drop=True)
mask = ~filtered_df["Evolves into"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = filtered_df[mask].reset_index(drop=True)
first_label = filtered_df.index[filtered_df["Pokemon"] == "Rowlet Rowlet"][0]
if first_label is not None:
pos = filtered_df.index.get_loc(first_label)
filtered_df = filtered_df.iloc[:pos].reset_index(drop=True)
filtered_df["Pokemon"] = filtered_df["Pokemon"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df["Evolves into"] = filtered_df["Evolves into"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df = filtered_df.fillna("None")
filtered_df.to_csv("fixedLevelEvolutions.csv", index=False)
def item_evolve_cleanup():
file_path = "C:/Users/Emily/Desktop/pokemonItemEvolve.csv"
df = pd.read_csv(file_path)
mask = ~df["Pokemon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = df[mask].reset_index(drop=True)
mask = ~filtered_df["Evolves into"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = filtered_df[mask].reset_index(drop=True)
first_label = filtered_df.index[filtered_df["Pokemon"] == "Crabrawler Crabrawler"][0]
if first_label is not None:
pos = filtered_df.index.get_loc(first_label)
filtered_df = filtered_df.iloc[:pos].reset_index(drop=True)
filtered_df["Pokemon"] = filtered_df["Pokemon"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df["Evolves into"] = filtered_df["Evolves into"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df = filtered_df.fillna("None")
filtered_df.to_csv("fixedItemEvolutions.csv", index=False)
def trade_evolve_cleanup():
file_path = "C:/Users/Emily/Desktop/pokemonTradeEvolve.csv"
df = pd.read_csv(file_path)
mask = ~df["Pokemon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = df[mask].reset_index(drop=True)
mask = ~filtered_df["Evolves into"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = filtered_df[mask].reset_index(drop=True)
filtered_df["Pokemon"] = filtered_df["Pokemon"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df["Evolves into"] = filtered_df["Evolves into"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df = filtered_df.fillna("None")
filtered_df.to_csv("fixedTradeEvolutions.csv", index=False)
def friend_evolve_cleanup():
file_path = "C:/Users/Emily/Desktop/pokemonFriendEvolve.csv"
df = pd.read_csv(file_path)
mask = ~df["Pokemon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = df[mask].reset_index(drop=True)
mask = ~filtered_df["Evolves into"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = filtered_df[mask].reset_index(drop=True)
filtered_df["Pokemon"] = filtered_df["Pokemon"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df["Evolves into"] = filtered_df["Evolves into"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df = filtered_df.fillna("None")
filtered_df.to_csv("fixedFriendEvolutions.csv", index=False)
def other_evolve_cleanup():
file_path = "C:/Users/Emily/Desktop/pokemonOtherEvolve.csv"
df = pd.read_csv(file_path)
mask = ~df["Pokemon"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = df[mask].reset_index(drop=True)
mask = ~filtered_df["Evolves into"].str.contains(r"Hisuian|Galarian|Alolan", case=False, na=False)
filtered_df = filtered_df[mask].reset_index(drop=True)
first_label = filtered_df.index[filtered_df["Pokemon"] == "Yungoos Yungoos"][0]
if first_label is not None:
pos = filtered_df.index.get_loc(first_label)
filtered_df = filtered_df.iloc[:pos].reset_index(drop=True)
filtered_df["Pokemon"] = filtered_df["Pokemon"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df["Evolves into"] = filtered_df["Evolves into"].str.replace(r"^(.+?)\s+\1$", r"\1", regex=True)
filtered_df = filtered_df.fillna("None")
filtered_df.to_csv("fixedOtherEvolutions.csv", index=False)
def fix_friendship():
#First need to fix friendship file to align with others
df = pd.read_csv("fixedFriendEvolutions.csv")
df.insert(2, "Friendship", "Friendship")
df.to_csv("doublefixedfriendshipevolve.csv", index=False)
def merge_evolve():
csv_files = glob.glob("C:/Users/Emily/Desktop/allevolutions/*.csv")
normalized = []
for path in csv_files:
df = pd.read_csv(path)
# find the one special column in this file:
fixed_cols = {"Pokemon", "Evolves into", "Evolve Condition"}
special_cols = [c for c in df.columns if c not in fixed_cols]
special = special_cols[0]
# ensure there's an Evolve Condition column (fill with NA if missing)
if "Evolve Condition" not in df.columns:
df["Evolve Condition"] = pd.NA
# keep only the four columns, renaming the special one to "Special"
df = df[["Pokemon", "Evolves into", special, "Evolve Condition"]]
df = df.rename(columns={ special: "Evolve Method" })
normalized.append(df)
big = pd.concat(normalized, ignore_index=True)
def collapse(vals):
# drop NA, dedupe, then commajoin
vs = vals.dropna().astype(str).unique()
return ", ".join(vs)
final = (
big
.groupby(["Pokemon","Evolves into"], as_index=False)
.agg({
"Evolve Method": collapse,
"Evolve Condition": collapse
}))
final.to_csv("merged_evolutions.csv", index=False)
def add_dex_to_evos():
df_evos = pd.read_csv("C:/Users/Emily/Desktop/merged_evolutions.csv")
df_dex = pd.read_csv("pokemon_bigmerge.csv")
df_merged = df_evos.merge(df_dex[["Pokémon", "Dex"]], on="Pokémon", how="left")
column_order = ["Dex", "Pokémon", "Evolves into","Evolve Method","Evolve Condition"]
df_merged = df_merged.reindex(columns=column_order)
df_merged = df_merged.sort_values("Dex").reset_index(drop=True)
df_merged.to_csv("evoswithdexTEST.csv", index=False)
df = pd.read_csv("evoswithdexTEST.csv")
df["Dex"] = df["Dex"].astype(int)
df = df.sort_values("Dex").reset_index(drop=True)
df.to_csv("evoswithdex.csv", index=False)