@@ 88,6 88,10 @@ class Cube:
}
)
+ self.dimensions["dimensionShortName"] = self.dimensions[
+ "dimensionName"
+ ].str.replace(r" ", "", regex=True)
+
self.hierarchies.rename(
columns={
"CATALOG_NAME": "catalogName",
@@ 152,6 156,10 @@ class Cube:
}
)
+ self.hierarchies = data.convert_to_camel(
+ self.hierarchies, {"hierarchyName": "hierarchyShortName"}
+ )
+
self.measures.rename(
columns={
"CATALOG_NAME": "catalogName",
@@ 202,6 210,10 @@ class Cube:
}
)
+ self.measures = data.convert_to_camel(
+ self.measures, {"measureName": "measureShortName"}
+ )
+
# ADO data type mapping should be set to python/pandas
ado_type_map = {
2: "int64",
@@ 269,6 281,18 @@ class Cube:
}
)
+ self.relationships["dimensionShortName"] = self.relationships[
+ "dimensionUniqueName"
+ ].str.replace(r"(\[|\])", "", regex=True)
+
+ self.relationships["dimensionShortName"] = self.dimensions[
+ "dimensionShortName"
+ ].str.replace(r" ", "", regex=True)
+
+ self.relationships["measuregroupName"] = self.dimensions[
+ "measuregroupShortName"
+ ].str.replace(r" ", "", regex=True)
+
@admin.initializor
def save_file(data: pd.DataFrame, table: str) -> None:
@@ 286,7 310,7 @@ def save_file(data: pd.DataFrame, table: str) -> None:
"""
# Initialize ade instance, checking for database locations of
# server or cache.
- root = admin.instance.server
+ root = admin.INSTANCE.server
# tables[0] = ade file name
# tables[1] = If the table includes an index that will also be outputted
@@ 304,7 328,7 @@ def save_file(data: pd.DataFrame, table: str) -> None:
saves = {
"national": [root / f"National Accounts/reference/{tables[table][0]}"],
- "general": [admin.instance.reference / f"{tables[table][0]}"],
+ "general": [admin.INSTANCE.reference / f"{tables[table][0]}"],
}
suffixes = [".csv"]
methods = ["to_csv"]
@@ 742,7 766,7 @@ def cube_schema(
"""
servers: dict[str, list[str]] = {
- key: value for key, value in admin.instance.conf["cube"].items()
+ key: value for key, value in admin.INSTANCE.conf["cube"].items()
}
if database:
@@ 851,11 875,11 @@ def sql_schema(
)
df.nullable = np.where(df.nullable == "NO", 0, 1)
- df = data.convert_pascal(df, cols={"columnName": "columnCamel"})
+ df = data.convert_to_camel(df, cols={"columnName": "columnCamel"})
# Manual override
schema_override = pd.read_csv(
- admin.instance.reference / "schema_override.csv", dtype="string"
+ admin.INSTANCE.reference / "schema_override.csv", dtype="string"
)
df = df.merge(
schema_override,
@@ 1493,7 1517,7 @@ class Headcount(pd.core.frame.DataFrame):
# Works on new employees if there are any
if not new_employees.empty:
- admin.instance.logger.info("Adding new employees.")
+ admin.INSTANCE.logger.info("Adding new employees.")
# Checks manager hierarchy in old table and if one of the new manager pairs
# is already labeled as Miscoded (ie labeled as a non-tech employee), the
@@ 1508,7 1532,7 @@ class Headcount(pd.core.frame.DataFrame):
False,
)
else:
- admin.instance.logger.info(
+ admin.INSTANCE.logger.info(
"No new employees to add, skipping to management."
)
@@ 101,7 101,7 @@ def convert_decimal(
return df
-def convert_pascal(df: pd.DataFrame, cols: dict[str, str]) -> pd.DataFrame:
+def convert_to_camel(df: pd.DataFrame, cols: dict[str, str]) -> pd.DataFrame:
"""Converts columns of PascalText to camelText.
Parameters