{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "dev-deid-600@aou-res-deid-vumc-test.iam.gserviceaccount.com df0ac049-d5b6-416f-ab3c-6321eda919d6 2018-09-25 08:18:34.829000+00:00 DONE\n" ] } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "from google.cloud import bigquery as bq\n", "\n", "client = bq.Client.from_service_account_json('/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')\n", "# pd.read_gbq(query=\"select * from raw.observation limit 10\",private_key='/home/steve/dev/google-cloud-sdk/accounts/vumc-test.json')\n", "jobs = client.list_jobs()\n", "for job in jobs :\n", "# print dir(job)\n", " print job.user_email,job.job_id,job.started, job.state\n", " break" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "xo = ['person_id','date_of_birth','race']\n", "xi = ['person_id','value_as_number','value_source_value']" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "def get_tables(client,id,fields=[]):\n", " \"\"\"\n", " getting table lists from google\n", " \"\"\"\n", " r = []\n", " ref = client.dataset(id)\n", " tables = list(client.list_tables(ref))\n", " for table in tables :\n", " ref = table.reference\n", " schema = client.get_table(ref).schema\n", " names = [f.name for f in schema]\n", " x = list(set(names) & set(fields))\n", " if x :\n", " r.append({\"name\":table.table_id,\"fields\":names})\n", " return r\n", " \n", "def get_fields(**args):\n", " \"\"\"\n", " This function will generate a random set of fields from two tables. Tables are structured as follows \n", " {name,fields:[],\"y\":}, with \n", " name table name (needed to generate sql query)\n", " fields list of field names, used in the projection\n", " y name of the field to be joined.\n", " @param xo candidate table in the join\n", " @param xi candidate table in the join\n", " @param join field by which the tables can be joined.\n", " \"\"\"\n", " # The set operation will remove redundancies in the field names (not sure it's a good idea)\n", "# xo = args['xo']['fields']\n", "# xi = args['xi']['fields']\n", "# zi = args['xi']['name']\n", "# return list(set([ \".\".join([args['xo']['name'],name]) for name in xo]) | set(['.'.join([args['xi']['name'],name]) for name in xi if name != args['join']]) )\n", " xo = args['xo']\n", " fields = [\".\".join([args['xo']['name'],name]) for name in args['xo']['fields']]\n", " if not isinstance(args['xi'],list) :\n", " x_ = [args['xi']]\n", " else:\n", " x_ = args['xi']\n", " for xi in x_ :\n", " fields += (['.'.join([xi['name'], name]) for name in xi['fields'] if name != args['join']])\n", " return fields\n", "def generate_sql(**args):\n", " \"\"\"\n", " This function will generate the SQL query for the resulting join\n", " \"\"\"\n", " \n", " xo = args['xo']\n", " x_ = args['xi']\n", " xo_name = \".\".join([args['prefix'],xo['name'] ]) if 'prefix' in args else xo['name']\n", " SQL = \"SELECT :fields FROM :xo.name \".replace(\":xo.name\",xo_name)\n", " if not isinstance(x_,list):\n", " x_ = [x_]\n", " f = []#[\".\".join([args['xo']['name'],args['join']] )] \n", " INNER_JOINS = []\n", " for xi in x_ :\n", " xi_name = \".\".join([args['prefix'],xi['name'] ]) if 'prefix' in args else xi['name']\n", " JOIN_SQL = \"INNER JOIN :xi.name ON \".replace(':xi.name',xi_name)\n", " value = \".\".join([xi['name'],args['join']])\n", " f.append(value) \n", " \n", " ON_SQL = \"\"\n", " tmp = []\n", " for term in f :\n", " ON_SQL = \":xi.name.:ofield = :xo.name.:ofield\".replace(\":xo.name\",xo['name'])\n", " ON_SQL = ON_SQL.replace(\":xi.name.:ofield\",term).replace(\":ofield\",args['join'])\n", " tmp.append(ON_SQL)\n", " INNER_JOINS += [JOIN_SQL + \" AND \".join(tmp)]\n", " return SQL + \" \".join(INNER_JOINS)\n", "def get_final_sql(**args):\n", " xo = args['xo']\n", " xi = args['xi']\n", " join=args['join']\n", " prefix = args['prefix'] if 'prefix' in args else ''\n", " fields = get_fields (xo=xo,xi=xi,join=join)\n", " k = len(fields)\n", " n = np.random.randint(2,k) #-- number of fields to select\n", " i = np.random.randint(0,k,size=n)\n", " fields = [name for name in fields if fields.index(name) in i]\n", " base_sql = generate_sql(xo=xo,xi=xi,prefix)\n", " SQL = \"\"\"\n", " SELECT AVERAGE(count),size,n as selected_features,k as total_features\n", " FROM(\n", " SELECT COUNT(*) as count,count(:join) as pop,sum(:n) as N,sum(:k) as k,:fields\n", " FROM (:sql)\n", " GROUP BY :fields\n", " ) \n", " order by 1\n", " \n", " \"\"\".replace(\":sql\",base_sql)\n", "# sql = \"SELECT :fields FROM :xo.name INNER JOIN :xi.name ON :xi.name.:xi.y = :xo.y \"\n", "# fields = \",\".join(get_fields(xo=xi,xi=xi,join=xi['y']))\n", " \n", " \n", "# sql = sql.replace(\":fields\",fields).replace(\":xo.name\",xo['name']).replace(\":xi.name\",xi['name'])\n", "# sql = sql.replace(\":xi.y\",xi['y']).replace(\":xo.y\",xo['y'])\n", "# return sql\n", " \n", " " ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race','value_as_number']}\n", "xi = [{\"name\":\"measurement\",\"fields\":['person_id','value_as_number','value_source_value']}] #,{\"name\":\"observation\",\"fields\":[\"person_id\",\"value_as_string\",\"observation_source_value\"]}]\n", "# generate_sql(xo=xo,xi=xi,join=\"person_id\",prefix='raw')\n", "fields = get_fields(xo=xo,xi=xi,join='person_id')\n", "ofields = list(fields)\n", "k = len(fields)\n", "n = np.random.randint(2,k) #-- number of fields to select\n", "i = np.random.randint(0,k,size=n)\n", "fields = [name for name in fields if fields.index(name) in i]" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['person.race', 'person.value_as_number', 'measurement.value_source_value']" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "fields\n" ] }, { "cell_type": "code", "execution_count": 55, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'SELECT person_id,value_as_number,measurements.value_source_value,measurements.value_as_number,value_source_value FROM person INNER JOIN measurements ON measurements.person_id = person_id '" ] }, "execution_count": 55, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xo = {\"name\":\"person\",\"fields\":['person_id','date_of_birth','race'],\"y\":\"person_id\"}\n", "xi = {\"name\":\"measurements\",\"fields\":['person_id','value_as_number','value_source_value'],\"y\":\"person_id\"}\n", "generate_sql(xo=xo,xi=xi)" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[('a', 'b'), ('a', 'c'), ('b', 'c')]" ] }, "execution_count": 59, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\"\"\"\n", " We are designing a process that will take two tables that will generate \n", "\"\"\"\n", "import itertools\n", "list(itertools.combinations(['a','b','c'],2))" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 0, 0])" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#\n", "# find every table with person id at the very least or a subset of fields\n", "#\n", "np.random.randint(0,4,size=4)" ] }, { "cell_type": "code", "execution_count": 90, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['a']" ] }, "execution_count": 90, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(set(['a','b']) & set(['a']))" ] }, { "cell_type": "code", "execution_count": 120, "metadata": {}, "outputs": [], "source": [ "x_ = 1" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "x_ = pd.DataFrame({\"group\":[1,1,1,1,1], \"size\":[2,1,1,1,1]})" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
size
group
11.2
\n", "
" ], "text/plain": [ " size\n", "group \n", "1 1.2" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "x_.groupby(['group']).mean()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.15rc1" } }, "nbformat": 4, "nbformat_minor": 2 }