From 81bc5a3ba155067806cedd4ac1128ff3d1aaca27 Mon Sep 17 00:00:00 2001 From: Steve Nyemba Date: Fri, 22 Dec 2023 14:02:32 -0600 Subject: [PATCH] bug fix: bigquery chunk/batch sizes --- transport/sql.py | 7 ++++--- transport/version.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/transport/sql.py b/transport/sql.py index 019db78..7be3900 100644 --- a/transport/sql.py +++ b/transport/sql.py @@ -32,7 +32,7 @@ import nzpy as nz #--- netezza drivers import sqlite3 import copy import os - +import time class SQLRW : lock = RLock() @@ -357,7 +357,7 @@ class SQLWriter(SQLRW,Writer): # # Writing with schema information ... rows = _info.iloc[i].to_sql(self.table,self._engine,schema=self.schema,if_exists='append',index=False) - + time.sleep(1) else: _fields = ",".join(self.fields) _sql = _sql.replace(":fields",_fields) @@ -495,10 +495,11 @@ class BQWriter(BigQuery,Writer): # _df.to_gbq(**self.mode) #if_exists='append',destination_table=partial,credentials=credentials,chunksize=90000) # # Let us adjust the chunking here - self._chunkks = 10 if _df.shape[0] > SQLRW.MAX_CHUNK and self._chunks == 1 else self._chunks + self._chunks = 10 if _df.shape[0] > SQLRW.MAX_CHUNK and self._chunks == 1 else self._chunks _indexes = np.array_split(np.arange(_df.shape[0]),self._chunks) for i in _indexes : _df.iloc[i].to_gbq(**self.mode) + time.sleep(1) pass # # Aliasing the big query classes allowing it to be backward compatible diff --git a/transport/version.py b/transport/version.py index 3fa6e8d..5ad4744 100644 --- a/transport/version.py +++ b/transport/version.py @@ -1,2 +1,2 @@ __author__ = 'The Phi Technology' -__version__= '1.9.2' +__version__= '1.9.3'