{ "cells": [ { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from google.cloud import bigquery as bq\n", "\n", "client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "xo = ['person_id','date_of_birth','race']\n", "xi = ['person_id','value_as_number','value_source_value']" ] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [], "source": [ "def get_tables(client,did,fields=[]):\n", " \"\"\"\n", " getting table lists from google\n", " \"\"\"\n", " r = []\n", " ref = client.dataset(id)\n", " tables = list(client.list_tables(ref))\n", " for table in tables :\n", " ref = table.reference\n", " schema = client.get_table(ref).schema\n", " names = [f.field_name for f in schema]\n", " x = list(set(names) & set(fields))\n", " if x :\n", " r.append({\"name\":table.table_id,\"fields\":names})\n", " return r\n", " \n", "def get_fields(**args):\n", " \"\"\"\n", " This function will generate a random set of fields from two tables. Tables are structured as follows \n", " {name,fields:[],\"y\":}, with \n", " name table name (needed to generate sql query)\n", " fields list of field names, used in the projection\n", " y name of the field to be joined.\n", " @param xo candidate table in the join\n", " @param xi candidate table in the join\n", " @param join field by which the tables can be joined.\n", " \"\"\"\n", " # The set operation will remove redundancies in the field names (not sure it's a good idea)\n", " xo = args['xo']['fields']\n", " xi = args['xi']['fields']\n", " zi = args['xi']['name']\n", " return list(set(xo) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n", "def generate_sql(**args):\n", " \"\"\"\n", " This function will generate the SQL query for the resulting join\n", " \"\"\"\n", " xo = args['xo']\n", " xi = args['xi']\n", " sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n", " fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n", " \n", " \n", " sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n", " sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n", " return sql\n", " \n", " " ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['person_id',\n", " 'measurements.value_as_number',\n", " 'date_of_birth',\n", " 'race',\n", " 'measurements.value_source_value']" ] }, "execution_count": 54, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race']}\n", "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value']}\n", "get_fields(xo=xo,xi=xi,join=\"person_id\")" ] }, { "cell_type": "code", "execution_count": 55, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '" ] }, "execution_count": 55, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n", "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n", "generate_sql(xo=xo,xi=xi)" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[('a', 'b'), ('a', 'c'), ('b', 'c')]" ] }, "execution_count": 59, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\"\"\"\n", " We are designing a process that will take two tables that will generate \n", "\"\"\"\n", "import itertools\n", "list(itertools.combinations(['a','b','c'],2))" ] }, { "cell_type": "code", "execution_count": 87, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "TableReference(DatasetReference(u'aou-res-deid-vumc-test', u'raw'), 'care_site')" ] }, "execution_count": 87, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ref = client.dataset('raw')\n", "tables = list(client.list_tables(ref))\n", "names = [table.table_id for table in tables]\n", "(tables[0].reference)" ] }, { "cell_type": "code", "execution_count": 85, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(u'care_site',\n", " u'concept',\n", " u'concept_ancestor',\n", " u'concept_class',\n", " u'concept_relationship',\n", " u'concept_synonym',\n", " u'condition_occurrence',\n", " u'criteria',\n", " u'death',\n", " u'device_exposure',\n", " u'domain',\n", " u'drug_exposure',\n", " u'drug_strength',\n", " u'location',\n", " u'measurement',\n", " u'note',\n", " u'observation',\n", " u'people_seed',\n", " u'person',\n", " u'procedure_occurrence',\n", " u'relationship',\n", " u'visit_occurrence',\n", " u'vocabulary')" ] }, "execution_count": 85, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#\n", "# find every table with person id at the very least or a subset of fields\n", "#\n", "def get_tables\n", "q = ['person_id']\n", "pairs = list(itertools.combinations(names,len(names)))\n", "pairs[0]" ] }, { "cell_type": "code", "execution_count": 90, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['a']" ] }, "execution_count": 90, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(set(['a','b']) & set(['a']))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.15rc1" } }, "nbformat": 4, "nbformat_minor": 2 }