Compare commits

...

11 Commits
master ... main

Author SHA1 Message Date
Steve Nyemba d9dac42adc merge fix
2 weeks ago
Steve Nyemba 8421511446 version
2 weeks ago
Steve Nyemba 4c2efc2892 documentation ... readme
2 weeks ago
Steve Nyemba a31481e196 fix
4 weeks ago
Steve Nyemba 89d762f39a bug fixes: conditional imports
4 weeks ago
Steve Nyemba 6e753a1fcd bug fixes
4 weeks ago
Steve Nyemba 18c54d7664 bug fixes
4 weeks ago
Steve Nyemba f06d26f9b6 bug fixes:installer & imports
4 weeks ago
Steve Nyemba be10ae17d7 bug fixes: installer & registry
4 weeks ago
Steve Nyemba befdf453f5 bug fix: crash with etl & process
1 month ago
Steve L. Nyemba 4109c4c1aa Initial commit
1 month ago

@ -0,0 +1,9 @@
MIT License
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -4,12 +4,11 @@ This project implements an abstraction of objects that can have access to a vari
# Why Use Data-Transport ?
Mostly data scientists that don't really care about the underlying database and would like a simple and consistent way to read/write and move data are well served. Additionally we implemented lightweight Extract Transform Loading API and command line (CLI) tool. Finally it is possible to add pre/post processing pipeline functions to read/write
1. Familiarity with **pandas data-frames**
2. Connectivity **drivers** are included
3. Reading/Writing data from various sources
4. Useful for data migrations or **ETL**
Data transport is a simple framework that:
- easy to install & modify (open-source)
- enables access to multiple database technologies (pandas, SQLAlchemy)
- enables notebook sharing without exposing database credential.
- supports pre/post processing specifications (pipeline)
## Installation
@ -18,19 +17,16 @@ Within the virtual environment perform the following :
pip install git+https://github.com/lnyemba/data-transport.git
## Features
Options to install components in square brackets
- read/write from over a dozen databases
- run ETL jobs seamlessly
- scales and integrates into shared environments like apache zeppelin; jupyterhub; SageMaker; ...
pip install data-transport[nosql,cloud,warehouse,all]@git+https://github.com/lnyemba/data-transport.git
## What's new
Unlike older versions 2.0 and under, we focus on collaborative environments like jupyter-x servers; apache zeppelin:
## Additional features
1. Simpler syntax to create reader or writer
2. auth-file registry that can be referenced using a label
3. duckdb support
- In addition to read/write, there is support for functions for pre/post processing
- CLI interface to add to registry, run ETL
- scales and integrates into shared environments like apache zeppelin; jupyterhub; SageMaker; ...
## Learn More

@ -53,9 +53,9 @@ def wait(jobs):
while jobs :
jobs = [thread for thread in jobs if thread.is_alive()]
time.sleep(1)
def wait (jobs):
while jobs :
jobs = [pthread for pthread in jobs if pthread.is_alive()]
# def wait (jobs):
# while jobs :
# jobs = [pthread for pthread in jobs if pthread.is_alive()]
@app_e.command(name="run")
def apply (path:Annotated[str,typer.Argument(help="path of the configuration file")],

@ -18,25 +18,17 @@ classifiers = [
]
dependencies = [
"termcolor","sqlalchemy", "aiosqlite","duckdb-engine",
"typer","pandas","numpy","sqlalchemy","pyarrow",
"mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite",
"typer","pandas","numpy","sqlalchemy","pyarrow","smart-open",
"plugin-ix@git+https://github.com/lnyemba/plugins-ix"
]
[project.optional-dependencies]
sql = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite"]
#sql = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite"]
nosql = ["pymongo","cloudant"]
cloud = ["pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore"]
cloud = ["boto","boto3","botocore","pyncclient","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore"]
warehouse = ["pydrill","pyspark","sqlalchemy_drill"]
rabbitmq = ["pika"]
sqlite = ["aiosqlite"]
aws3 = ["boto3","boto","botocore"]
nextcloud = ["pyncclient"]
mongodb = ["pymongo"]
netezza = ["nzpy"]
mysql = ["mysql-connector-python"]
postgresql = ["psycopg2-binary"]
sqlserver = ["pymssql"]
http = ["flask-session"]
all = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite","pymongo","cloudant","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore","pydrill","pyspark","sqlalchemy_drill", "pika","aiosqlite","boto3","boto","botocore", "pyncclient"]
other = ["pika","flask-session"]
all = ["pymongo","cloudant","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore","pydrill","pyspark","sqlalchemy_drill", "pika","aiosqlite","boto3","boto","botocore", "pyncclient"]
[project.urls]
Homepage = "https://healthcareio.the-phi.com/git/code/transport.git"

@ -18,7 +18,27 @@ Source Code is available under MIT License:
"""
import numpy as np
from transport import sql, nosql, cloud, other, warehouse
#from transport import sql, nosql, cloud, other, warehouse
from transport import sql
try:
from transport import nosql
except Exception as e:
nosql = {}
try:
from transport import cloud
except Exception as e:
cloud = {}
try:
from transport import warehouse
except Exception as e:
warehouse = {}
try:
from transport import other
except Exception as e:
other = {}
import pandas as pd
import json
import os
@ -35,7 +55,7 @@ def init():
global PROVIDERS
for _module in [cloud,sql,nosql,other,warehouse] :
for _provider_name in dir(_module) :
if _provider_name.startswith('__') or _provider_name == 'common':
if _provider_name.startswith('__') or _provider_name == 'common' or type(_module) in [None,str,dict]:
continue
PROVIDERS[_provider_name] = {'module':getattr(_module,_provider_name),'type':_module.__name__}
#

@ -49,7 +49,8 @@ def init (email,path=REGISTRY_PATH,override=False,_file=REGISTRY_FILE):
Initializing the registry and will raise an exception in the advent of an issue
"""
p = '@' in email
q = False if '.' not in email else email.split('.')[-1] in ['edu','com','io','ai','org']
#q = False if '.' not in email else email.split('.')[-1] in ['edu','com','io','ai','org']
q = len(email.split('.')[-1]) in [2,3]
if p and q :
_config = {"email":email,'version':__version__}
if not os.path.exists(path):

Loading…
Cancel
Save