-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #3 from DolphDev/develop
Push Fixes and Changes to master
- Loading branch information
Showing
15 changed files
with
482 additions
and
311 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,117 +1 @@ | ||
from .core.objects.apiobjects import MainSelection | ||
from .core.objects import BaseRow, banned_columns | ||
from .core.exceptions.messages import LoadingMsg as msg | ||
|
||
|
||
import csv | ||
import io | ||
import glob | ||
import itertools | ||
|
||
|
||
def csv_size_limit(size): | ||
"""Changes the csv field size limit. | ||
:param size: The size limit of the csv data. | ||
:type size: :class:`type` | ||
""" | ||
csv.field_size_limit(size) | ||
|
||
|
||
def load(f, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"', mode='r', buffering=-1, | ||
encoding="utf-8", errors=None, newline=None, closefd=True, opener=None, typetransfer=True, | ||
csv_size_max=None, csv_max_row=None): | ||
"""Loads a file into psv | ||
:param cls: The class that will be used for csv data. | ||
:type cls: :class:`BaseRow` (or class that inherits it) | ||
""" | ||
if csv_size_max: | ||
csv_size_limit(csv_size_max) | ||
|
||
if not csv_max_row: | ||
with f if isinstance(f, io._io._IOBase) else open(f, mode=mode, buffering=buffering, | ||
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile: | ||
data = csv.DictReader( | ||
csvfile, delimiter=delimiter, quotechar=quotechar) | ||
api = MainSelection(data, columns=column_names(csvfile.name, cls, quotechar, delimiter, | ||
mode, buffering, encoding, errors, newline, closefd, opener), | ||
outputfiled=outputfile, cls=cls, typetransfer=typetransfer) | ||
else: | ||
with f if isinstance(f, io._io._IOBase) else open(f, mode=mode, buffering=buffering, | ||
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile: | ||
data = itertools.islice(csv.DictReader( | ||
csvfile, delimiter=delimiter, quotechar=quotechar), csv_max_row) | ||
api = MainSelection(data, columns=column_names(csvfile.name, cls, quotechar, delimiter, | ||
mode, buffering, encoding, errors, newline, closefd, opener), | ||
outputfiled=outputfile, cls=cls, typetransfer=typetransfer) | ||
return api | ||
|
||
|
||
def loaddir(f, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"', mode='r', buffering=-1, | ||
encoding="utf-8", errors=None, newline=None, closefd=True, opener=None, typetransfer=True, | ||
csv_size_max=None, filetype="*.csv"): | ||
"""Loads a directory of .csv files""" | ||
if csv_size_max: | ||
csv_size_limit(csv_size_max) | ||
data = [] | ||
columns = None | ||
for files in glob.glob(f+filetype): | ||
if not columns: | ||
columns = column_names(files) | ||
with open(files, mode=mode, buffering=buffering, | ||
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile: | ||
data = data + list(csv.DictReader(csvfile, | ||
delimiter=delimiter, quotechar=quotechar)) | ||
forbidden_columns(columns) | ||
return MainSelection(data, columns=columns, outputfiled=outputfile, cls=cls, typetransfer=typetransfer) | ||
|
||
|
||
def loads(csvdoc, columns=None, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"', | ||
typetransfer=True, csv_size_max=None, newline="\n"): | ||
was_str = False | ||
if csv_size_max: | ||
csv_size_limit(csv_size_max) | ||
if isinstance(csvdoc, str): | ||
was_str = True | ||
data = csv.DictReader(csvdoc.split(newline), | ||
delimiter=delimiter, quotechar=quotechar) | ||
if not columns: | ||
columns = tuple(next(csv.reader(csvdoc.split( | ||
newline), delimiter=delimiter, quotechar=quotechar))) | ||
else: | ||
data = csvdoc | ||
if columns: | ||
forbidden_columns(columns) | ||
elif (not columns) and isinstance(csvdoc, dict): | ||
forbidden_columns(csvdoc.keys()) | ||
api = MainSelection(data, columns=( | ||
columns), outputfiled=outputfile, cls=cls, typetransfer=typetransfer) | ||
return api | ||
|
||
|
||
def new(cls=BaseRow, columns=None, outputfile=None, | ||
csv_size_max=None): | ||
if csv_size_max: | ||
csv_size_limit(csv_size_max) | ||
if columns: | ||
forbidden_columns(columns) | ||
return MainSelection(columns=columns, outputfiled=outputfile, cls=cls) | ||
|
||
|
||
def column_names(f, cls=BaseRow, quotechar='"', delimiter=",", mode='r', buffering=-1, encoding="utf-8", | ||
errors=None, newline=None, closefd=True, opener=None, | ||
csv_size_max=None, check_columns=True): | ||
if csv_size_max: | ||
csv_size_limit(csv_size_max) | ||
with open(f, mode=mode, buffering=buffering, | ||
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile: | ||
columns = next(csv.reader(csvfile, delimiter=',', quotechar=quotechar)) | ||
if check_columns: | ||
forbidden_columns(columns) | ||
return tuple(columns) | ||
|
||
def forbidden_columns(columns): | ||
for x in columns: | ||
if x in banned_columns: | ||
raise ValueError(msg.forbidden_column(x)) | ||
from .main import load, loads, loaddir, new, csv_size_limit, column_names |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,21 +1,3 @@ | ||
from . import messages | ||
|
||
|
||
class PSV_BASE_EXCEPTION(Exception): | ||
pass | ||
|
||
|
||
class RowError(PSV_BASE_EXCEPTION): | ||
pass | ||
|
||
|
||
class FlagError(RowError): | ||
pass | ||
|
||
|
||
class SelectionError(PSV_BASE_EXCEPTION): | ||
pass | ||
|
||
|
||
class ApiError(SelectionError): | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,32 +1 @@ | ||
import csv | ||
import io | ||
from types import FunctionType | ||
|
||
|
||
def outputfile(fileloc, rows, columnnames, quote_all=True, encoding="utf-8"): | ||
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)): | ||
raise ValueError( | ||
"Provided Columns must be a list was {}".format(type(columnnames))) | ||
with open(fileloc, 'w', encoding=encoding, newline='') as csvfile: | ||
fieldnames = columnnames | ||
writer = csv.DictWriter( | ||
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0) | ||
writer.writeheader() | ||
for x in rows: | ||
if x.outputrow: | ||
writer.writerow(x.longcolumn(columnnames)) | ||
|
||
|
||
def outputstr(rows, columnnames, quote_all, encoding="utf-8"): | ||
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)): | ||
raise ValueError( | ||
"Provided Columns must be a list was {}".format(type(columnnames))) | ||
with io.StringIO() as csvfile: | ||
fieldnames = columnnames | ||
writer = csv.DictWriter( | ||
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0) | ||
writer.writeheader() | ||
for x in rows: | ||
if x.outputrow: | ||
writer.writerow(x.longcolumn(columnnames)) | ||
return csvfile.getvalue() | ||
from .main import outputfile, outputstr |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
import csv | ||
import io | ||
from types import FunctionType | ||
|
||
|
||
def outputfile(fileloc, rows, columnnames, quote_all=True, encoding="utf-8"): | ||
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)): | ||
raise ValueError( | ||
"Provided Columns must be a list was {}".format(type(columnnames))) | ||
with open(fileloc, 'w', encoding=encoding, newline='') as csvfile: | ||
fieldnames = columnnames | ||
writer = csv.DictWriter( | ||
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0) | ||
writer.writeheader() | ||
for x in rows: | ||
if x.outputrow: | ||
writer.writerow(x.longcolumn(columnnames)) | ||
|
||
|
||
def outputstr(rows, columnnames, quote_all, encoding="utf-8"): | ||
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)): | ||
raise ValueError( | ||
"Provided Columns must be a list was {}".format(type(columnnames))) | ||
with io.StringIO() as csvfile: | ||
fieldnames = columnnames | ||
writer = csv.DictWriter( | ||
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0) | ||
writer.writeheader() | ||
for x in rows: | ||
if x.outputrow: | ||
writer.writerow(x.longcolumn(columnnames)) | ||
return csvfile.getvalue() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,21 +1 @@ | ||
"""This file contains all csv/excel parsing code""" | ||
|
||
from ..utils import translate_type | ||
from ..objects import cleanup_name | ||
|
||
|
||
def parser(csvfile, cls, columns_map, typetranfer=True, *args, **kwargs): | ||
"""This generates row objects for csv, and sets them up | ||
for dynamic access""" | ||
for row in csvfile: | ||
if typetranfer: | ||
yield cls({(x): translate_type(row[x]) | ||
for x in row.keys()}, columns_map, *args, **kwargs) | ||
else: | ||
yield cls(row, columns_map, *args, **kwargs) | ||
|
||
|
||
def parser_addrow(columns, cls, columns_map, typetranfer=True, *args, **kwargs): | ||
r = cls({}, columns_map, *args, **kwargs) | ||
r.update(({(x): "" for x in columns})) | ||
return r | ||
from .main import parser, parser_addrow |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
"""This file contains all csv/excel parsing code""" | ||
|
||
from ..utils import translate_type | ||
from ..objects import cleanup_name | ||
|
||
|
||
def parser(csvfile, cls, columns_map, typetranfer=True, *args, **kwargs): | ||
"""This generates row objects for csv, and sets them up | ||
for dynamic access""" | ||
for row in csvfile: | ||
if typetranfer: | ||
yield cls({(x): translate_type(row[x]) | ||
for x in row.keys()}, columns_map, *args, **kwargs) | ||
else: | ||
yield cls(row, columns_map, *args, **kwargs) | ||
|
||
|
||
def parser_addrow(columns, cls, columns_map, typetranfer=True, *args, **kwargs): | ||
r = cls({}, columns_map, *args, **kwargs) | ||
r.update(({(x): "" for x in columns})) | ||
return r |
Oops, something went wrong.