Skip to content

Commit

Permalink
Merge pull request #3 from DolphDev/develop
Browse files Browse the repository at this point in the history
Push Fixes and Changes to master
  • Loading branch information
DolphDev authored Jan 24, 2018
2 parents 93da674 + 7ea77db commit 27b803d
Show file tree
Hide file tree
Showing 15 changed files with 482 additions and 311 deletions.
118 changes: 1 addition & 117 deletions psv/__init__.py
Original file line number Diff line number Diff line change
@@ -1,117 +1 @@
from .core.objects.apiobjects import MainSelection
from .core.objects import BaseRow, banned_columns
from .core.exceptions.messages import LoadingMsg as msg


import csv
import io
import glob
import itertools


def csv_size_limit(size):
"""Changes the csv field size limit.
:param size: The size limit of the csv data.
:type size: :class:`type`
"""
csv.field_size_limit(size)


def load(f, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"', mode='r', buffering=-1,
encoding="utf-8", errors=None, newline=None, closefd=True, opener=None, typetransfer=True,
csv_size_max=None, csv_max_row=None):
"""Loads a file into psv
:param cls: The class that will be used for csv data.
:type cls: :class:`BaseRow` (or class that inherits it)
"""
if csv_size_max:
csv_size_limit(csv_size_max)

if not csv_max_row:
with f if isinstance(f, io._io._IOBase) else open(f, mode=mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile:
data = csv.DictReader(
csvfile, delimiter=delimiter, quotechar=quotechar)
api = MainSelection(data, columns=column_names(csvfile.name, cls, quotechar, delimiter,
mode, buffering, encoding, errors, newline, closefd, opener),
outputfiled=outputfile, cls=cls, typetransfer=typetransfer)
else:
with f if isinstance(f, io._io._IOBase) else open(f, mode=mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile:
data = itertools.islice(csv.DictReader(
csvfile, delimiter=delimiter, quotechar=quotechar), csv_max_row)
api = MainSelection(data, columns=column_names(csvfile.name, cls, quotechar, delimiter,
mode, buffering, encoding, errors, newline, closefd, opener),
outputfiled=outputfile, cls=cls, typetransfer=typetransfer)
return api


def loaddir(f, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"', mode='r', buffering=-1,
encoding="utf-8", errors=None, newline=None, closefd=True, opener=None, typetransfer=True,
csv_size_max=None, filetype="*.csv"):
"""Loads a directory of .csv files"""
if csv_size_max:
csv_size_limit(csv_size_max)
data = []
columns = None
for files in glob.glob(f+filetype):
if not columns:
columns = column_names(files)
with open(files, mode=mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile:
data = data + list(csv.DictReader(csvfile,
delimiter=delimiter, quotechar=quotechar))
forbidden_columns(columns)
return MainSelection(data, columns=columns, outputfiled=outputfile, cls=cls, typetransfer=typetransfer)


def loads(csvdoc, columns=None, cls=BaseRow, outputfile=None, delimiter=",", quotechar='"',
typetransfer=True, csv_size_max=None, newline="\n"):
was_str = False
if csv_size_max:
csv_size_limit(csv_size_max)
if isinstance(csvdoc, str):
was_str = True
data = csv.DictReader(csvdoc.split(newline),
delimiter=delimiter, quotechar=quotechar)
if not columns:
columns = tuple(next(csv.reader(csvdoc.split(
newline), delimiter=delimiter, quotechar=quotechar)))
else:
data = csvdoc
if columns:
forbidden_columns(columns)
elif (not columns) and isinstance(csvdoc, dict):
forbidden_columns(csvdoc.keys())
api = MainSelection(data, columns=(
columns), outputfiled=outputfile, cls=cls, typetransfer=typetransfer)
return api


def new(cls=BaseRow, columns=None, outputfile=None,
csv_size_max=None):
if csv_size_max:
csv_size_limit(csv_size_max)
if columns:
forbidden_columns(columns)
return MainSelection(columns=columns, outputfiled=outputfile, cls=cls)


def column_names(f, cls=BaseRow, quotechar='"', delimiter=",", mode='r', buffering=-1, encoding="utf-8",
errors=None, newline=None, closefd=True, opener=None,
csv_size_max=None, check_columns=True):
if csv_size_max:
csv_size_limit(csv_size_max)
with open(f, mode=mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) as csvfile:
columns = next(csv.reader(csvfile, delimiter=',', quotechar=quotechar))
if check_columns:
forbidden_columns(columns)
return tuple(columns)

def forbidden_columns(columns):
for x in columns:
if x in banned_columns:
raise ValueError(msg.forbidden_column(x))
from .main import load, loads, loaddir, new, csv_size_limit, column_names
18 changes: 0 additions & 18 deletions psv/core/exceptions/__init__.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,3 @@
from . import messages


class PSV_BASE_EXCEPTION(Exception):
pass


class RowError(PSV_BASE_EXCEPTION):
pass


class FlagError(RowError):
pass


class SelectionError(PSV_BASE_EXCEPTION):
pass


class ApiError(SelectionError):
pass
4 changes: 2 additions & 2 deletions psv/core/objects/apiobjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ def columns(self, v):
self.__columns__ = v


def addrow(self, columns=None, cls=BaseRow, **kwargs):
r = parser_addrow(columns if columns else self.__columns__, cls, self.__columnsmap__)
def addrow(self, cls=BaseRow, **kwargs):
r = parser_addrow(self.__columns__, cls, self.__columnsmap__)
self.__rows__.append(r)
if kwargs:
for k, v in kwargs.items():
Expand Down
13 changes: 7 additions & 6 deletions psv/core/objects/rowobjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,10 @@ def __repr__(self):
)

def __str__(self):
rv = "<'{rowname}':{columnamount}>".format(
return "<'{rowname}':{columnamount}>".format(
rowname=self.__class__.__name__,
columnamount=len(self.keys())
)
return rv

def __pos__(self):
self.outputrow = True
Expand Down Expand Up @@ -96,9 +95,10 @@ def __setattr__(self, attr, v):
"""Allows setting of rows and attributes by using =
statement"""
s = cleanup_name(attr)
if attr in self["__psvcolumnstracker__"].keys():
keys = self["__psvcolumnstracker__"].keys()
if attr in keys:
self[self["__psvcolumnstracker__"][attr]] = v
elif s in self["__psvcolumnstracker__"].keys():
elif s in keys:
raise AttributeError((
"{}{}"
.format(
Expand All @@ -122,9 +122,10 @@ def __delattr__(self, attr):
"""Allows deletion of rows and attributes (Makes a row empty) by using
del statement"""
s = cleanup_name(attr)
if attr in self["__psvcolumnstracker__"].keys():
keys = self["__psvcolumnstracker__"].keys()
if attr in keys:
self[self["__psvcolumnstracker__"][attr]] = ""
elif s in self["__psvcolumnstracker__"].keys():
elif s in keys:
raise AttributeError((
"{}{}"
.format(
Expand Down
12 changes: 6 additions & 6 deletions psv/core/objects/selections.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from ..output import outputfile, outputstr
from ..utils import multiple_index, limit_text
from ..utils import _index_function_gen, asciireplace, generate_func, generate_func_any
from ..exceptions import SelectionError
from ..exceptions.messages import ApiObjectMsg as msg

from types import FunctionType
Expand Down Expand Up @@ -52,7 +51,7 @@ def _merge(self, args):
for key in keys:
yield master[key]

def merge(self, *args, force_saftey=True):
def merge(self, *args, force_safety=True):
"""Merges selections
Note: This merge's algorithm relies on the uniqueness of the rows.
Expand All @@ -63,8 +62,9 @@ def merge(self, *args, force_saftey=True):
"""
try:
if (not all(self.__apimother__ is x.__apimother__ for x in args)) and force_saftey:
raise TypeError("Merge by default only accepts rows from same origin")
if force_safety:
if (not all(self.__apimother__ is x.__apimother__ for x in args)):
raise ValueError("Merge by default only accepts rows from same origin")
return Selection(tuple(self._merge(args)), self.__apimother__)
except TypeError as exc:
raise TypeError(
Expand All @@ -87,7 +87,7 @@ def non_hash_merge(self, *args):
state of this selection.
"""
if not all(self.__apimother__ is x.__apimother__ for x in args):
raise Exception("non_hash_merge only accepts rows from same origin")
raise ValueError("non_hash_merge only accepts rows from same origin")
outputstore = tuple(x.__output__ for x in self.__apimother__)
self.__apimother__.no_output()
for x in ((self,) + args):
Expand Down Expand Up @@ -260,7 +260,7 @@ def grab(self, *args):
elif len(arg) == 1:
return tuple(self[arg[0]])
else:
raise SelectionError(msg.badgrab)
raise ValueError(msg.badgrab)

def remove_duplicates(self, soft=True):
"""Removes duplicates.
Expand Down
33 changes: 1 addition & 32 deletions psv/core/output/__init__.py
Original file line number Diff line number Diff line change
@@ -1,32 +1 @@
import csv
import io
from types import FunctionType


def outputfile(fileloc, rows, columnnames, quote_all=True, encoding="utf-8"):
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)):
raise ValueError(
"Provided Columns must be a list was {}".format(type(columnnames)))
with open(fileloc, 'w', encoding=encoding, newline='') as csvfile:
fieldnames = columnnames
writer = csv.DictWriter(
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0)
writer.writeheader()
for x in rows:
if x.outputrow:
writer.writerow(x.longcolumn(columnnames))


def outputstr(rows, columnnames, quote_all, encoding="utf-8"):
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)):
raise ValueError(
"Provided Columns must be a list was {}".format(type(columnnames)))
with io.StringIO() as csvfile:
fieldnames = columnnames
writer = csv.DictWriter(
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0)
writer.writeheader()
for x in rows:
if x.outputrow:
writer.writerow(x.longcolumn(columnnames))
return csvfile.getvalue()
from .main import outputfile, outputstr
32 changes: 32 additions & 0 deletions psv/core/output/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import csv
import io
from types import FunctionType


def outputfile(fileloc, rows, columnnames, quote_all=True, encoding="utf-8"):
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)):
raise ValueError(
"Provided Columns must be a list was {}".format(type(columnnames)))
with open(fileloc, 'w', encoding=encoding, newline='') as csvfile:
fieldnames = columnnames
writer = csv.DictWriter(
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0)
writer.writeheader()
for x in rows:
if x.outputrow:
writer.writerow(x.longcolumn(columnnames))


def outputstr(rows, columnnames, quote_all, encoding="utf-8"):
if not (isinstance(columnnames, list) or isinstance(columnnames, tuple)):
raise ValueError(
"Provided Columns must be a list was {}".format(type(columnnames)))
with io.StringIO() as csvfile:
fieldnames = columnnames
writer = csv.DictWriter(
csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL if quote_all else 0)
writer.writeheader()
for x in rows:
if x.outputrow:
writer.writerow(x.longcolumn(columnnames))
return csvfile.getvalue()
22 changes: 1 addition & 21 deletions psv/core/parsing/__init__.py
Original file line number Diff line number Diff line change
@@ -1,21 +1 @@
"""This file contains all csv/excel parsing code"""

from ..utils import translate_type
from ..objects import cleanup_name


def parser(csvfile, cls, columns_map, typetranfer=True, *args, **kwargs):
"""This generates row objects for csv, and sets them up
for dynamic access"""
for row in csvfile:
if typetranfer:
yield cls({(x): translate_type(row[x])
for x in row.keys()}, columns_map, *args, **kwargs)
else:
yield cls(row, columns_map, *args, **kwargs)


def parser_addrow(columns, cls, columns_map, typetranfer=True, *args, **kwargs):
r = cls({}, columns_map, *args, **kwargs)
r.update(({(x): "" for x in columns}))
return r
from .main import parser, parser_addrow
21 changes: 21 additions & 0 deletions psv/core/parsing/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
"""This file contains all csv/excel parsing code"""

from ..utils import translate_type
from ..objects import cleanup_name


def parser(csvfile, cls, columns_map, typetranfer=True, *args, **kwargs):
"""This generates row objects for csv, and sets them up
for dynamic access"""
for row in csvfile:
if typetranfer:
yield cls({(x): translate_type(row[x])
for x in row.keys()}, columns_map, *args, **kwargs)
else:
yield cls(row, columns_map, *args, **kwargs)


def parser_addrow(columns, cls, columns_map, typetranfer=True, *args, **kwargs):
r = cls({}, columns_map, *args, **kwargs)
r.update(({(x): "" for x in columns}))
return r
Loading

0 comments on commit 27b803d

Please sign in to comment.