|
|
|
"""
|
|
|
|
This file will perform basic tasks to finalize the GAN process by performing the following :
|
|
|
|
- basic stats & analytics
|
|
|
|
- rebuild io to another dataset
|
|
|
|
"""
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
from google.oauth2 import service_account
|
|
|
|
from google.cloud import bigquery as bq
|
|
|
|
from data.params import SYS_ARGS
|
|
|
|
import json
|
|
|
|
class Analytics :
|
|
|
|
"""
|
|
|
|
This class will compile basic analytics about a given dataset i.e compare original/synthetic
|
|
|
|
"""
|
|
|
|
@staticmethod
|
|
|
|
def distribution(**args):
|
|
|
|
context = args['context']
|
|
|
|
df = args['data']
|
|
|
|
#
|
|
|
|
#-- This data frame counts unique values for each feature (space)
|
|
|
|
df_counts = pd.DataFrame(df.apply(lambda col: col.unique().size),columns=['counts']).T # unique counts
|
|
|
|
#
|
|
|
|
#-- Get the distributions for common values
|
|
|
|
#
|
|
|
|
names = [name for name in df_counts.columns.tolist() if name.endswith('_io') == False]
|
|
|
|
ddf = df.apply(lambda col: pd.DataFrame(col.values,columns=[col.name]).groupby([col.name]).size() ).fillna(0)
|
|
|
|
ddf[context] = ddf.index
|
|
|
|
|
|
|
|
pass
|
|
|
|
def distance(**args):
|
|
|
|
"""
|
|
|
|
This function will measure the distance between
|
|
|
|
"""
|
|
|
|
df = args['data']
|
|
|
|
names = [name for name in df_counts.columns.tolist() if name.endswith('_io') == False]
|
|
|
|
class Utils :
|
|
|
|
class get :
|
|
|
|
@staticmethod
|
|
|
|
def config(**args) :
|
|
|
|
contexts = args['contexts'].split(',') if type(args['contexts']) == str else args['contexts']
|
|
|
|
pipeline = args['pipeline']
|
|
|
|
return [ item for item in pipeline if item['context'] in contexts]
|
|
|
|
@staticmethod
|
|
|
|
def sql(**args) :
|
|
|
|
"""
|
|
|
|
This function is intended to build SQL query for the remainder of the table that was not synthesized
|
|
|
|
:config configuration entries
|
|
|
|
:from source of the table name
|
|
|
|
:dataset name of the source dataset
|
|
|
|
|
|
|
|
"""
|
|
|
|
SQL = ["SELECT * FROM :from "]
|
|
|
|
SQL_FILTER = []
|
|
|
|
NO_FILTERS_FOUND = True
|
|
|
|
pipeline = Utils.get.config(**args)
|
|
|
|
REVERSE_QUALIFIER = {'IN':'NOT IN','NOT IN':'IN','=':'<>','<>':'='}
|
|
|
|
for item in pipeline :
|
|
|
|
|
|
|
|
|
|
|
|
if 'filter' in item :
|
|
|
|
if NO_FILTERS_FOUND :
|
|
|
|
NO_FILTERS_FOUND = False
|
|
|
|
SQL += ['WHERE']
|
|
|
|
#
|
|
|
|
# Let us load the filter in the SQL Query
|
|
|
|
FILTER = item['filter']
|
|
|
|
QUALIFIER = REVERSE_QUALIFIER[FILTER['qualifier'].upper()]
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
from google.oauth2 import service_account
|
|
|
|
import json
|
|
|
|
|
|
|
|
# path = '../curation-prod.json'
|
|
|
|
# credentials = service_account.Credentials.from_service_account_file(path)
|
|
|
|
# df = pd.read_gbq("SELECT * FROM io.icd10_partial_io",credentials=credentials,dialect='standard')
|