From 18bfa63df1290d95ed27a2c7da1a62f99ff50eef Mon Sep 17 00:00:00 2001 From: "Steve L. Nyemba -- The Architect" Date: Tue, 18 Sep 2018 18:54:17 -0500 Subject: [PATCH] experimental design (notebook) --- .../.ipynb_checkpoints/risk-checkpoint.ipynb | 273 +++++++++++++++ notebooks/risk.ipynb | 319 ++++++++++++++++++ 2 files changed, 592 insertions(+) create mode 100644 notebooks/.ipynb_checkpoints/risk-checkpoint.ipynb create mode 100644 notebooks/risk.ipynb diff --git a/notebooks/.ipynb_checkpoints/risk-checkpoint.ipynb b/notebooks/.ipynb_checkpoints/risk-checkpoint.ipynb new file mode 100644 index 0000000..dd5cf33 --- /dev/null +++ b/notebooks/.ipynb_checkpoints/risk-checkpoint.ipynb @@ -0,0 +1,273 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "from google.cloud import bigquery as bq\n", + "\n", + "client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "xo = ['person_id','date_of_birth','race']\n", + "xi = ['person_id','value_as_number','value_source_value']" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "def get_tables(client,did,fields=[]):\n", + " \"\"\"\n", + " getting table lists from google\n", + " \"\"\"\n", + " r = []\n", + " ref = client.dataset(id)\n", + " tables = list(client.list_tables(ref))\n", + " for table in tables :\n", + " ref = table.reference\n", + " schema = client.get_table(ref).schema\n", + " names = [f.field_name for f in schema]\n", + " x = list(set(names) & set(fields))\n", + " if x :\n", + " r.append({\"name\":table.table_id,\"fields\":names})\n", + " return r\n", + " \n", + "def get_fields(**args):\n", + " \"\"\"\n", + " This function will generate a random set of fields from two tables. Tables are structured as follows \n", + " {name,fields:[],\"y\":}, with \n", + " name table name (needed to generate sql query)\n", + " fields list of field names, used in the projection\n", + " y name of the field to be joined.\n", + " @param xo candidate table in the join\n", + " @param xi candidate table in the join\n", + " @param join field by which the tables can be joined.\n", + " \"\"\"\n", + " # The set operation will remove redundancies in the field names (not sure it's a good idea)\n", + " xo = args['xo']['fields']\n", + " xi = args['xi']['fields']\n", + " zi = args['xi']['name']\n", + " return list(set(xo) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n", + "def generate_sql(**args):\n", + " \"\"\"\n", + " This function will generate the SQL query for the resulting join\n", + " \"\"\"\n", + " xo = args['xo']\n", + " xi = args['xi']\n", + " sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n", + " fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n", + " \n", + " \n", + " sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n", + " sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n", + " return sql\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['person_id',\n", + " 'measurements.value_as_number',\n", + " 'date_of_birth',\n", + " 'race',\n", + " 'measurements.value_source_value']" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race']}\n", + "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value']}\n", + "get_fields(xo=xo,xi=xi,join=\"person_id\")" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '" + ] + }, + "execution_count": 55, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n", + "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n", + "generate_sql(xo=xo,xi=xi)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('a', 'b'), ('a', 'c'), ('b', 'c')]" + ] + }, + "execution_count": 59, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + " We are designing a process that will take two tables that will generate \n", + "\"\"\"\n", + "import itertools\n", + "list(itertools.combinations(['a','b','c'],2))" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "TableReference(DatasetReference(u'aou-res-deid-vumc-test', u'raw'), 'care_site')" + ] + }, + "execution_count": 87, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ref = client.dataset('raw')\n", + "tables = list(client.list_tables(ref))\n", + "names = [table.table_id for table in tables]\n", + "(tables[0].reference)" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(u'care_site',\n", + " u'concept',\n", + " u'concept_ancestor',\n", + " u'concept_class',\n", + " u'concept_relationship',\n", + " u'concept_synonym',\n", + " u'condition_occurrence',\n", + " u'criteria',\n", + " u'death',\n", + " u'device_exposure',\n", + " u'domain',\n", + " u'drug_exposure',\n", + " u'drug_strength',\n", + " u'location',\n", + " u'measurement',\n", + " u'note',\n", + " u'observation',\n", + " u'people_seed',\n", + " u'person',\n", + " u'procedure_occurrence',\n", + " u'relationship',\n", + " u'visit_occurrence',\n", + " u'vocabulary')" + ] + }, + "execution_count": 85, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#\n", + "# find every table with person id at the very least or a subset of fields\n", + "#\n", + "def get_tables\n", + "q = ['person_id']\n", + "pairs = list(itertools.combinations(names,len(names)))\n", + "pairs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['a']" + ] + }, + "execution_count": 90, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(set(['a','b']) & set(['a']))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15rc1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/risk.ipynb b/notebooks/risk.ipynb new file mode 100644 index 0000000..fc86de5 --- /dev/null +++ b/notebooks/risk.ipynb @@ -0,0 +1,319 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "from google.cloud import bigquery as bq\n", + "\n", + "client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "xo = ['person_id','date_of_birth','race']\n", + "xi = ['person_id','value_as_number','value_source_value']" + ] + }, + { + "cell_type": "code", + "execution_count": 181, + "metadata": {}, + "outputs": [], + "source": [ + "def get_tables(client,id,fields=[]):\n", + " \"\"\"\n", + " getting table lists from google\n", + " \"\"\"\n", + " r = []\n", + " ref = client.dataset(id)\n", + " tables = list(client.list_tables(ref))\n", + " for table in tables :\n", + " ref = table.reference\n", + " schema = client.get_table(ref).schema\n", + " names = [f.name for f in schema]\n", + " x = list(set(names) & set(fields))\n", + " if x :\n", + " r.append({\"name\":table.table_id,\"fields\":names})\n", + " return r\n", + " \n", + "def get_fields(**args):\n", + " \"\"\"\n", + " This function will generate a random set of fields from two tables. Tables are structured as follows \n", + " {name,fields:[],\"y\":}, with \n", + " name table name (needed to generate sql query)\n", + " fields list of field names, used in the projection\n", + " y name of the field to be joined.\n", + " @param xo candidate table in the join\n", + " @param xi candidate table in the join\n", + " @param join field by which the tables can be joined.\n", + " \"\"\"\n", + " # The set operation will remove redundancies in the field names (not sure it's a good idea)\n", + "# xo = args['xo']['fields']\n", + "# xi = args['xi']['fields']\n", + "# zi = args['xi']['name']\n", + "# return list(set([ \".\".join([args['xo']['name'],name]) for name in xo]) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n", + " xo = args['xo']\n", + " fields = [\".\".join([args['xo']['name'],name]) for name in args['xo']['fields']]\n", + " if not isinstance(args['xi'],list) :\n", + " x_ = [args['xi']]\n", + " else:\n", + " x_ = args['xi']\n", + " for xi in x_ :\n", + " fields += (['.'.join([xi['name'],name]) for name in xi['fields'] if name != args['join']])\n", + " return fields\n", + "def generate_sql(**args):\n", + " \"\"\"\n", + " This function will generate the SQL query for the resulting join\n", + " \"\"\"\n", + " \n", + " xo = args['xo']\n", + " x_ = args['xi']\n", + " xo_name = \".\".join([args['prefix'],xo['name'] ]) if 'prefix' in args else xo['name']\n", + " SQL = \"SELECT :fields FROM :xo.name \".replace(\":xo.name\",xo_name)\n", + " if not isinstance(x_,list):\n", + " x_ = [x_]\n", + " f = []#[\".\".join([args['xo']['name'],args['join']] )] \n", + " INNER_JOINS = []\n", + " for xi in x_ :\n", + " xi_name = \".\".join([args['prefix'],xi['name'] ]) if 'prefix' in args else xi['name']\n", + " JOIN_SQL = \"INNER JOIN :xi.name ON \".replace(':xi.name',xi_name)\n", + " value = \".\".join([xi['name'],args['join']])\n", + " f.append(value) \n", + " \n", + " ON_SQL = \"\"\n", + " tmp = []\n", + " for term in f :\n", + " ON_SQL = \":xi.name.:ofield = :xo.name.:ofield\".replace(\":xo.name\",xo['name'])\n", + " ON_SQL = ON_SQL.replace(\":xi.name.:ofield\",term).replace(\":ofield\",args['join'])\n", + " tmp.append(ON_SQL)\n", + " INNER_JOINS += [JOIN_SQL + \" AND \".join(tmp)]\n", + " return SQL + \" \".join(INNER_JOINS)\n", + " \n", + "# sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n", + "# fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n", + " \n", + " \n", + "# sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n", + "# sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n", + "# return sql\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'SELECT :fields FROM raw.person INNER JOIN raw.measurement ON measurement.person_id = person.person_id'" + ] + }, + "execution_count": 183, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race']}\n", + "xi = [{\"name\":\"measurement\",\"fields\":['person_id','value_as_number','value_source_value']}] #,{\"name\":\"observation\",\"fields\":[\"person_id\",\"value_as_string\",\"observation_source_value\"]}]\n", + "generate_sql(xo=xo,xi=xi,join=\"person_id\",prefix='raw')" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '" + ] + }, + "execution_count": 55, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n", + "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n", + "generate_sql(xo=xo,xi=xi)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('a', 'b'), ('a', 'c'), ('b', 'c')]" + ] + }, + "execution_count": 59, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + " We are designing a process that will take two tables that will generate \n", + "\"\"\"\n", + "import itertools\n", + "list(itertools.combinations(['a','b','c'],2))" + ] + }, + { + "cell_type": "code", + "execution_count": 111, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[u'condition_occurrence.condition_occurrence_id',\n", + " u'condition_occurrence.person_id',\n", + " u'condition_occurrence.condition_concept_id',\n", + " u'condition_occurrence.condition_start_date',\n", + " u'condition_occurrence.condition_start_datetime',\n", + " u'condition_occurrence.condition_end_date',\n", + " u'condition_occurrence.condition_end_datetime',\n", + " u'condition_occurrence.condition_type_concept_id',\n", + " u'condition_occurrence.stop_reason',\n", + " u'condition_occurrence.provider_id',\n", + " u'condition_occurrence.visit_occurrence_id',\n", + " u'condition_occurrence.condition_source_value',\n", + " u'condition_occurrence.condition_source_concept_id',\n", + " u'death.death_date',\n", + " u'death.death_datetime',\n", + " u'death.death_type_concept_id',\n", + " u'death.cause_concept_id',\n", + " u'death.cause_source_value',\n", + " u'death.cause_source_concept_id',\n", + " u'device_exposure.device_exposure_id',\n", + " u'device_exposure.device_concept_id',\n", + " u'device_exposure.device_exposure_start_date',\n", + " u'device_exposure.device_exposure_start_datetime',\n", + " u'device_exposure.device_exposure_end_date',\n", + " u'device_exposure.device_exposure_end_datetime',\n", + " u'device_exposure.device_type_concept_id',\n", + " u'device_exposure.unique_device_id',\n", + " u'device_exposure.quantity',\n", + " u'device_exposure.provider_id',\n", + " u'device_exposure.visit_occurrence_id',\n", + " u'device_exposure.device_source_value',\n", + " u'device_exposure.device_source_concept_id',\n", + " u'drug_exposure.drug_exposure_id',\n", + " u'drug_exposure.drug_concept_id',\n", + " u'drug_exposure.drug_exposure_start_date',\n", + " u'drug_exposure.drug_exposure_start_datetime',\n", + " u'drug_exposure.drug_exposure_end_date',\n", + " u'drug_exposure.drug_exposure_end_datetime',\n", + " u'drug_exposure.drug_type_concept_id',\n", + " u'drug_exposure.stop_reason',\n", + " u'drug_exposure.refills',\n", + " u'drug_exposure.quantity',\n", + " u'drug_exposure.days_supply',\n", + " u'drug_exposure.sig',\n", + " u'drug_exposure.route_concept_id',\n", + " u'drug_exposure.effective_drug_dose',\n", + " u'drug_exposure.dose_unit_concept_id',\n", + " u'drug_exposure.lot_number',\n", + " u'drug_exposure.provider_id',\n", + " u'drug_exposure.visit_occurrence_id',\n", + " u'drug_exposure.drug_source_value',\n", + " u'drug_exposure.drug_source_concept_id',\n", + " u'drug_exposure.route_source_value',\n", + " u'drug_exposure.dose_unit_source_value']" + ] + }, + "execution_count": 111, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#\n", + "# find every table with person id at the very least or a subset of fields\n", + "#\n", + "info = get_tables(client,'raw',['person_id'])\n", + "# get_fields(xo=names[0],xi=names[1:4],join='person_id')\n", + "\n", + "# q = ['person_id']\n", + "# pairs = list(itertools.combinations(names,len(names)))\n", + "# pairs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['a']" + ] + }, + "execution_count": 90, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(set(['a','b']) & set(['a']))" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": {}, + "outputs": [], + "source": [ + "x_ = 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15rc1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}