Compare commits

..

20 Commits
main ... master

Author SHA1 Message Date
Steve L. Nyemba 96e200cf23
Merge pull request #27 from lnyemba/v2.2.0
1 month ago
Steve L. Nyemba a7b7c0591a
Merge pull request #26 from lnyemba/v2.2.0
1 month ago
Steve L. Nyemba a0d42514bd
Merge pull request #25 from lnyemba/v2.2.0
1 month ago
Steve L. Nyemba 2f97fa24e0
Merge pull request #24 from lnyemba/v2.2.0
1 month ago
Steve L. Nyemba 06d53a7739
Merge pull request #23 from lnyemba/v2.2.0
1 month ago
Steve L. Nyemba 16dc2ca8b1
Merge pull request #22 from lnyemba/v2.2.0
2 months ago
Steve L. Nyemba d6f96ac1b2
Merge pull request #21 from lnyemba/v2.2.0
4 months ago
Steve L. Nyemba aa926f77a3
Merge pull request #20 from lnyemba/v2.2.0
11 months ago
Steve L. Nyemba 4d9dcc776c
Merge pull request #19 from lnyemba/v2.2.0
11 months ago
Steve L. Nyemba 53e4ab39dc
Merge pull request #18 from lnyemba/v2.2.0
11 months ago
Steve L. Nyemba 100085bd1b
Merge pull request #17 from lnyemba/v2.2.0
1 year ago
Steve L. Nyemba b30dc0f023
Merge pull request #16 from lnyemba/v2.2.0
1 year ago
Steve L. Nyemba 29f135ecc4
Merge pull request #15 from lnyemba/v2.2.0
1 year ago
Steve L. Nyemba f9aba5db1c
Merge pull request #14 from lnyemba/v2.0.4
1 year ago
Steve L. Nyemba 76484f0130
Merge pull request #13 from lnyemba/v2.0.4
1 year ago
Steve L. Nyemba a4481865ea
Merge pull request #12 from lnyemba/v2.0.4
1 year ago
Steve L. Nyemba 01fd94d7b1
Merge pull request #11 from lnyemba/v2.0.4
1 year ago
Steve L. Nyemba 30e0297859
Merge pull request #10 from lnyemba/v2.0
1 year ago
Steve L. Nyemba 874c380e4d
Merge pull request #9 from lnyemba/dev
1 year ago
Steve L. Nyemba 061806fded
Merge pull request #8 from lnyemba/dev
1 year ago

@ -1,9 +0,0 @@
MIT License
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -4,11 +4,12 @@ This project implements an abstraction of objects that can have access to a vari
# Why Use Data-Transport ? # Why Use Data-Transport ?
Data transport is a simple framework that: Mostly data scientists that don't really care about the underlying database and would like a simple and consistent way to read/write and move data are well served. Additionally we implemented lightweight Extract Transform Loading API and command line (CLI) tool. Finally it is possible to add pre/post processing pipeline functions to read/write
- easy to install & modify (open-source)
- enables access to multiple database technologies (pandas, SQLAlchemy) 1. Familiarity with **pandas data-frames**
- enables notebook sharing without exposing database credential. 2. Connectivity **drivers** are included
- supports pre/post processing specifications (pipeline) 3. Reading/Writing data from various sources
4. Useful for data migrations or **ETL**
## Installation ## Installation
@ -17,16 +18,19 @@ Within the virtual environment perform the following :
pip install git+https://github.com/lnyemba/data-transport.git pip install git+https://github.com/lnyemba/data-transport.git
Options to install components in square brackets ## Features
pip install data-transport[nosql,cloud,warehouse,all]@git+https://github.com/lnyemba/data-transport.git - read/write from over a dozen databases
- run ETL jobs seamlessly
- scales and integrates into shared environments like apache zeppelin; jupyterhub; SageMaker; ...
## What's new
## Additional features Unlike older versions 2.0 and under, we focus on collaborative environments like jupyter-x servers; apache zeppelin:
- In addition to read/write, there is support for functions for pre/post processing 1. Simpler syntax to create reader or writer
- CLI interface to add to registry, run ETL 2. auth-file registry that can be referenced using a label
- scales and integrates into shared environments like apache zeppelin; jupyterhub; SageMaker; ... 3. duckdb support
## Learn More ## Learn More

@ -53,9 +53,9 @@ def wait(jobs):
while jobs : while jobs :
jobs = [thread for thread in jobs if thread.is_alive()] jobs = [thread for thread in jobs if thread.is_alive()]
time.sleep(1) time.sleep(1)
# def wait (jobs): def wait (jobs):
# while jobs : while jobs :
# jobs = [pthread for pthread in jobs if pthread.is_alive()] jobs = [pthread for pthread in jobs if pthread.is_alive()]
@app_e.command(name="run") @app_e.command(name="run")
def apply (path:Annotated[str,typer.Argument(help="path of the configuration file")], def apply (path:Annotated[str,typer.Argument(help="path of the configuration file")],

@ -18,17 +18,25 @@ classifiers = [
] ]
dependencies = [ dependencies = [
"termcolor","sqlalchemy", "aiosqlite","duckdb-engine", "termcolor","sqlalchemy", "aiosqlite","duckdb-engine",
"mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite", "typer","pandas","numpy","sqlalchemy","pyarrow",
"typer","pandas","numpy","sqlalchemy","pyarrow","smart-open",
"plugin-ix@git+https://github.com/lnyemba/plugins-ix" "plugin-ix@git+https://github.com/lnyemba/plugins-ix"
] ]
[project.optional-dependencies] [project.optional-dependencies]
#sql = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite"] sql = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite"]
nosql = ["pymongo","cloudant"] nosql = ["pymongo","cloudant"]
cloud = ["boto","boto3","botocore","pyncclient","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore"] cloud = ["pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore"]
warehouse = ["pydrill","pyspark","sqlalchemy_drill"] warehouse = ["pydrill","pyspark","sqlalchemy_drill"]
other = ["pika","flask-session"] rabbitmq = ["pika"]
all = ["pymongo","cloudant","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore","pydrill","pyspark","sqlalchemy_drill", "pika","aiosqlite","boto3","boto","botocore", "pyncclient"] sqlite = ["aiosqlite"]
aws3 = ["boto3","boto","botocore"]
nextcloud = ["pyncclient"]
mongodb = ["pymongo"]
netezza = ["nzpy"]
mysql = ["mysql-connector-python"]
postgresql = ["psycopg2-binary"]
sqlserver = ["pymssql"]
http = ["flask-session"]
all = ["mysql-connector-python","psycopg2-binary","nzpy","pymssql","duckdb-engine","aiosqlite","pymongo","cloudant","pandas-gbq","google-cloud-bigquery","google-cloud-bigquery-storage", "databricks-sqlalchemy","pyncclient","boto3","boto","botocore","pydrill","pyspark","sqlalchemy_drill", "pika","aiosqlite","boto3","boto","botocore", "pyncclient"]
[project.urls] [project.urls]
Homepage = "https://healthcareio.the-phi.com/git/code/transport.git" Homepage = "https://healthcareio.the-phi.com/git/code/transport.git"

@ -18,27 +18,7 @@ Source Code is available under MIT License:
""" """
import numpy as np import numpy as np
#from transport import sql, nosql, cloud, other, warehouse from transport import sql, nosql, cloud, other, warehouse
from transport import sql
try:
from transport import nosql
except Exception as e:
nosql = {}
try:
from transport import cloud
except Exception as e:
cloud = {}
try:
from transport import warehouse
except Exception as e:
warehouse = {}
try:
from transport import other
except Exception as e:
other = {}
import pandas as pd import pandas as pd
import json import json
import os import os
@ -55,7 +35,7 @@ def init():
global PROVIDERS global PROVIDERS
for _module in [cloud,sql,nosql,other,warehouse] : for _module in [cloud,sql,nosql,other,warehouse] :
for _provider_name in dir(_module) : for _provider_name in dir(_module) :
if _provider_name.startswith('__') or _provider_name == 'common' or type(_module) in [None,str,dict]: if _provider_name.startswith('__') or _provider_name == 'common':
continue continue
PROVIDERS[_provider_name] = {'module':getattr(_module,_provider_name),'type':_module.__name__} PROVIDERS[_provider_name] = {'module':getattr(_module,_provider_name),'type':_module.__name__}
# #

@ -49,8 +49,7 @@ def init (email,path=REGISTRY_PATH,override=False,_file=REGISTRY_FILE):
Initializing the registry and will raise an exception in the advent of an issue Initializing the registry and will raise an exception in the advent of an issue
""" """
p = '@' in email p = '@' in email
#q = False if '.' not in email else email.split('.')[-1] in ['edu','com','io','ai','org'] q = False if '.' not in email else email.split('.')[-1] in ['edu','com','io','ai','org']
q = len(email.split('.')[-1]) in [2,3]
if p and q : if p and q :
_config = {"email":email,'version':__version__} _config = {"email":email,'version':__version__}
if not os.path.exists(path): if not os.path.exists(path):

Loading…
Cancel
Save