parent
f4c0ca80aa
commit
3b5c9d1503
@ -0,0 +1,63 @@
|
|||||||
|
from transport.common import Reader, Writer,Console #, factory
|
||||||
|
from transport import disk
|
||||||
|
import sqlite3
|
||||||
|
from transport import s3 as s3
|
||||||
|
from transport import rabbitmq as queue
|
||||||
|
from transport import couch as couch
|
||||||
|
from transport import mongo as mongo
|
||||||
|
from transport import sql as sql
|
||||||
|
from transport import etl as etl
|
||||||
|
from transport import qlistener
|
||||||
|
import psycopg2 as pg
|
||||||
|
import mysql.connector as my
|
||||||
|
from google.cloud import bigquery as bq
|
||||||
|
import nzpy as nz #--- netezza drivers
|
||||||
|
import os
|
||||||
|
|
||||||
|
from transport.version import __version__
|
||||||
|
|
||||||
|
POSTGRESQL = 'postgresql'
|
||||||
|
MONGODB = 'mongodb'
|
||||||
|
HTTP='http'
|
||||||
|
BIGQUERY ='bigquery'
|
||||||
|
FILE = 'file'
|
||||||
|
ETL = 'etl'
|
||||||
|
SQLITE = 'sqlite'
|
||||||
|
SQLITE3= 'sqlite'
|
||||||
|
REDSHIFT = 'redshift'
|
||||||
|
NETEZZA = 'netezza'
|
||||||
|
MYSQL = 'mysql'
|
||||||
|
RABBITMQ = 'rabbitmq'
|
||||||
|
MARIADB = 'mariadb'
|
||||||
|
COUCHDB = 'couch'
|
||||||
|
CONSOLE = 'console'
|
||||||
|
ETL = 'etl'
|
||||||
|
#
|
||||||
|
# synonyms of the above
|
||||||
|
BQ = BIGQUERY
|
||||||
|
MONGO = MONGODB
|
||||||
|
FERRETDB= MONGODB
|
||||||
|
PG = POSTGRESQL
|
||||||
|
PSQL = POSTGRESQL
|
||||||
|
PGSQL = POSTGRESQL
|
||||||
|
S3 = 's3'
|
||||||
|
AWS_S3 = 's3'
|
||||||
|
RABBIT = RABBITMQ
|
||||||
|
|
||||||
|
QLISTENER = 'qlistener'
|
||||||
|
|
||||||
|
DRIVERS = {PG:pg,REDSHIFT:pg,MYSQL:my,MARIADB:my,NETEZZA:nz,SQLITE:sqlite3}
|
||||||
|
CATEGORIES ={'sql':[NETEZZA,PG,MYSQL,REDSHIFT,SQLITE,MARIADB],'nosql':[MONGODB,COUCHDB],'cloud':[BIGQUERY],'file':[FILE],
|
||||||
|
'queue':[RABBIT,QLISTENER],'memory':[CONSOLE,QLISTENER],'http':[HTTP]}
|
||||||
|
|
||||||
|
READ = {'sql':sql.SQLReader,'nosql':{MONGODB:mongo.MongoReader,COUCHDB:couch.CouchReader},'cloud':sql.BigQueryReader,
|
||||||
|
'file':disk.DiskReader,'queue':{RABBIT:queue.QueueReader,QLISTENER:qlistener.qListener}
|
||||||
|
}
|
||||||
|
WRITE = {'sql':sql.SQLWriter,'nosql':{MONGODB:mongo.MongoWriter,COUCHDB:couch.CouchWriter},'cloud':sql.BigQueryWriter,
|
||||||
|
'file':disk.DiskWriter,'queue':{RABBIT:queue.QueueWriter,QLISTENER:qlistener.qListener}
|
||||||
|
}
|
||||||
|
DEFAULT = {PG:{'host':'localhost','port':5432},MYSQL:{'host':'localhost','port':3306}}
|
||||||
|
DEFAULT[MONGODB] = {'port':27017,'host':'localhost'}
|
||||||
|
DEFAULT[REDSHIFT] = DEFAULT[PG]
|
||||||
|
DEFAULT[MARIADB] = DEFAULT[MYSQL]
|
||||||
|
DEFAULT[NETEZZA] = {'port':5480}
|
@ -0,0 +1,42 @@
|
|||||||
|
import queue
|
||||||
|
from threading import Thread, Lock
|
||||||
|
from transport.common import Reader,Writer
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
class qListener :
|
||||||
|
lock = Lock()
|
||||||
|
_queue = {'default':queue.Queue()}
|
||||||
|
def __init__(self,**_args):
|
||||||
|
self._cache = {}
|
||||||
|
self._callback = _args['callback'] if 'callback' in _args else None
|
||||||
|
self._id = _args['id'] if 'id' in _args else 'default'
|
||||||
|
if self._id not in qListener._queue :
|
||||||
|
qListener._queue[self._id] = queue.Queue()
|
||||||
|
thread = Thread(target=self._forward)
|
||||||
|
thread.start()
|
||||||
|
def _forward(self):
|
||||||
|
_q = qListener._queue[self._id]
|
||||||
|
_data = _q.get()
|
||||||
|
_q.task_done()
|
||||||
|
self._callback(_data)
|
||||||
|
|
||||||
|
def has(self,**_args) :
|
||||||
|
return self._callback is not None
|
||||||
|
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""
|
||||||
|
This will empty the queue and have it ready for another operation
|
||||||
|
"""
|
||||||
|
_q = qListener._queue[self._id]
|
||||||
|
with _q.mutex:
|
||||||
|
_q.queue.clear()
|
||||||
|
_q.all_tasks_done.notify_all()
|
||||||
|
|
||||||
|
def write(self,_data,**_args):
|
||||||
|
_id = _args['id'] if 'id' in _args else self._id
|
||||||
|
|
||||||
|
_q = qListener._queue[_id]
|
||||||
|
_q.put(_data)
|
||||||
|
_q.join()
|
Loading…
Reference in new issue