source: rattail/rattail/batch/handlers.py @ 7b4d418

Last change on this file since 7b4d418 was 7b4d418, checked in by Lance Edgar <ledgar@…>, 9 months ago

Add "calculated" invoice total for receiving row, batch

so then invoice_total is meant to reflect the "original" total as obtained
from the invoice proper, whereas invoice_total_calculated is up to us

  • Property mode set to 100644
File size: 25.2 KB
Line 
1# -*- coding: utf-8; -*-
2################################################################################
3#
4#  Rattail -- Retail Software Framework
5#  Copyright © 2010-2018 Lance Edgar
6#
7#  This file is part of Rattail.
8#
9#  Rattail is free software: you can redistribute it and/or modify it under the
10#  terms of the GNU General Public License as published by the Free Software
11#  Foundation, either version 3 of the License, or (at your option) any later
12#  version.
13#
14#  Rattail is distributed in the hope that it will be useful, but WITHOUT ANY
15#  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
16#  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
17#  details.
18#
19#  You should have received a copy of the GNU General Public License along with
20#  Rattail.  If not, see <http://www.gnu.org/licenses/>.
21#
22################################################################################
23"""
24Data Batch Handlers
25"""
26
27from __future__ import unicode_literals, absolute_import
28
29import os
30import shutil
31import datetime
32import warnings
33
34from sqlalchemy import orm
35
36from rattail.core import Object
37from rattail.db.cache import cache_model
38from rattail.time import localtime, make_utc
39from rattail.util import progress_loop, load_object
40
41
42class BatchHandler(object):
43    """
44    Base class and partial default implementation for batch handlers.  It is
45    expected that all batch handlers will ultimately inherit from this base
46    class, therefore it defines the implementation "interface" loosely
47    speaking.  Custom batch handlers are welcome to supplement or override this
48    as needed, and in fact must do so for certain aspects.
49
50    .. attribute:: populate_batches
51
52       Simple flag to indicate whether any/all batches being handled, will
53       require initial population from a relevant data source.  Note that this
54       flag should be set to ``True`` if *any* batches may need population.
55       Whether or not a given batch actually needs to be populated, is
56       ultimately determined by the :meth:`should_populate()` method.
57
58    .. attribute:: populate_with_versioning
59
60       This flag indicates whether it's okay for data versioning to be enabled
61       during initial batch population.
62
63       If set to ``True`` (the default), then versioning is allowed and
64       therefore the caller need take no special precautions when populating
65       the batch.
66
67       If set to ``False`` then versioning is *not* allowed; if versioning is
68       not enabled for the current process, the caller may populate the batch
69       with no special precautions.  However if versioning *is* enabled, the
70       caller must launch a separate process with versioning disabled, in order
71       to populate the batch.
72
73    .. attribute:: refresh_with_versioning
74
75       This flag indicates whether it's okay for data versioning to be enabled
76       during batch refresh.
77
78       If set to ``True`` (the default), then versioning is allowed and
79       therefore the caller need take no special precautions when populating
80       the batch.
81
82       If set to ``False`` then versioning is *not* allowed; if versioning is
83       not enabled for the current process, the caller may populate the batch
84       with no special precautions.  However if versioning *is* enabled, the
85       caller must launch a separate process with versioning disabled, in order
86       to refresh the batch.
87
88    .. attribute:: execute_with_versioning
89
90       This flag indicates whether it's okay for data versioning to be enabled
91       during batch execution.
92
93       If set to ``True`` (the default), then versioning is allowed and
94       therefore the caller need take no special precautions when populating
95       the batch.
96
97       If set to ``False`` then versioning is *not* allowed; if versioning is
98       not enabled for the current process, the caller may populate the batch
99       with no special precautions.  However if versioning *is* enabled, the
100       caller must launch a separate process with versioning disabled, in order
101       to execute the batch.
102
103    .. attribute:: repopulate_when_refresh
104
105       Flag to indicate that when a batch is refreshed, the first step of that
106       should be to re-populate the batch.  The flag is ``False`` by default,
107       in which case the batch is *not* repopulated, i.e. the refresh will work
108       with existing batch rows.
109    """
110    populate_batches = False
111    populate_with_versioning = True
112
113    refresh_with_versioning = True
114    repopulate_when_refresh = False
115
116    execute_with_versioning = True
117
118    def __init__(self, config):
119        self.config = config
120        self.enum = config.get_enum()
121
122    @property
123    def batch_model_class(self):
124        """
125        Reference to the data model class of the batch type for which this
126        handler is responsible, e.g. :class:`rattail.db.model.LabelBatch`.
127        Each handler must define this (or inherit from one that does).
128        """
129        raise NotImplementedError("You must set the 'batch_model_class' attribute "
130                                  "for class '{}'".format(self.__class__.__name__))
131
132    @property
133    def batch_key(self):
134        """
135        The "batch type key" for the handler, e.g. ``'labels'``.  This isn't
136        necessarily unique among handlers, but instead refers to a unique key
137        for the type of batch being handled.  The handler needn't define this,
138        as it is borrowed from :attr:`batch_model_class`.
139        """
140        return self.batch_model_class.batch_key
141
142    def get_model_title(self):
143        return self.batch_model_class.get_model_title()
144
145    def allow_versioning(self, action):
146        if action == 'populate':
147            return self.populate_with_versioning
148        if action == 'refresh':
149            return self.refresh_with_versioning
150        if action == 'execute':
151            return self.execute_with_versioning
152        raise NotImplementedError("unknown batch action: {}".format(action))
153
154    def make_basic_batch(self, session, progress=None, **kwargs):
155        """
156        Make a new "basic" batch, with no customization beyond what is provided
157        by ``kwargs``, which are passed directly to the batch class constructor.
158        """
159        kwargs.setdefault('rowcount', 0)
160        kwargs.setdefault('complete', False)
161        batch = self.batch_model_class(**kwargs)
162        session.add(batch)
163        session.flush()
164        return batch
165
166    def make_batch(self, session, progress=None, **kwargs):
167        """
168        Make a new batch, with initial rows if applicable.
169        """
170        batch = self.make_basic_batch(session, progress=progress, **kwargs)
171        self.init_batch(batch, progress=progress, **kwargs)
172        return batch
173
174    def init_batch(self, batch, progress=None, **kwargs):
175        """
176        Initialize the batch in whatever way might make sense.  Whether this is
177        required at all is up to the batch handler etc.
178        """
179
180    def add_row(self, batch, row):
181        """
182        (Try to) Add the given row to the given batch.  This assumes it is a
183        *new* row, perhaps along with other assumptions?
184        """
185        session = orm.object_session(batch)
186        with session.no_autoflush:
187            batch.data_rows.append(row)
188            self.refresh_row(row)
189        if not row.removed:
190            batch.rowcount += 1
191            self.after_add_row(batch, row)
192
193    def after_add_row(self, batch, row):
194        """
195        Event hook, called immediately after the given row has been "properly"
196        added to the batch.  This is a good place to update totals for the
197        batch, to account for the new row, etc.
198        """
199
200    def purge_batches(self, session, before=None, before_days=90,
201                      delete_all_data=True, progress=None, **kwargs):
202        """
203        Purge all batches which were executed prior to a given date.
204
205        :param before: If provided, must be a timezone-aware datetime object.
206           If not provided, it will be calculated from the current date, using
207           ``before_days``.
208
209        :param before_days: Number of days before the current date, to be used
210           as the cutoff date if ``before`` is not specified.
211
212        :param delete_all_data: Flag indicating whether *all* data should be
213           deleted for each batch being purged.  This flag is passed along to
214           :meth:`delete()`; see that for more info.
215
216        :returns: Integer indicating the number of batches purged.
217        """
218        if not before:
219            before = localtime(self.config).date() - datetime.timedelta(days=before_days)
220            before = datetime.datetime.combine(before, datetime.time(0))
221            before = localtime(self.config, before)
222
223        old_batches = session.query(self.batch_model_class)\
224                             .filter(self.batch_model_class.executed < before)\
225                             .options(orm.joinedload(self.batch_model_class.data_rows))
226        result = Object()
227        result.purged = 0
228
229        def purge(batch, i):
230            self.delete(batch, delete_all_data=delete_all_data, progress=progress)
231            session.delete(batch)
232            result.purged += 1
233            if i % 5 == 0:
234                session.flush()
235
236        self.progress_loop(purge, old_batches, progress,
237                           message="Purging old batches")
238
239        session.flush()
240        return result.purged
241
242    @property
243    def root_datadir(self):
244        """
245        The absolute path of the root folder in which data for this particular
246        type of batch is stored.  The structure of this path is as follows:
247
248        .. code-block:: none
249
250           /{root_batch_data_dir}/{batch_type_key}
251
252        * ``{root_batch_data_dir}`` - Value of the 'batch.files' option in the
253          [rattail] section of config file.
254        * ``{batch_type_key}`` - Unique key for the type of batch it is.
255
256        .. note::
257           While it is likely that the data folder returned by this method
258           already exists, this method does not guarantee it.
259        """
260        return self.config.batch_filedir(self.batch_key)
261
262    def datadir(self, batch):
263        """
264        Returns the absolute path of the folder in which the batch's source
265        data file(s) resides.  Note that the batch must already have been
266        persisted to the database.  The structure of the path returned is as
267        follows:
268
269        .. code-block:: none
270
271           /{root_datadir}/{uuid[:2]}/{uuid[2:]}
272
273        * ``{root_datadir}`` - Value returned by :meth:`root_datadir()`.
274        * ``{uuid[:2]}`` - First two characters of batch UUID.
275        * ``{uuid[2:]}`` - All batch UUID characters *after* the first two.
276
277        .. note::
278           While it is likely that the data folder returned by this method
279           already exists, this method does not guarantee any such thing.  It
280           is typically assumed that the path will have been created by a
281           previous call to :meth:`make_batch()` however.
282        """
283        return os.path.join(self.root_datadir, batch.uuid[:2], batch.uuid[2:])
284
285    def make_datadir(self, batch):
286        """
287        Returns the data folder specific to the given batch, creating if necessary.
288        """
289        datadir = self.datadir(batch)
290        os.makedirs(datadir)
291        return datadir
292
293    # TODO: remove default attr?
294    def set_input_file(self, batch, path, attr='filename'):
295        """
296        Assign the data file found at ``path`` to the batch.  This overwrites
297        the given attribute (``attr``) of the batch and places a copy of the
298        data file in the batch's data folder.
299        """
300        datadir = self.make_datadir(batch)
301        filename = os.path.basename(path)
302        shutil.copyfile(path, os.path.join(datadir, filename))
303        setattr(batch, attr, filename)
304
305    def should_populate(self, batch):
306        """
307        Must return a boolean indicating whether the given batch should be
308        populated from an initial data source, i.e. at time of batch creation.
309        Override this method if you need to inspect the batch in order to
310        determine whether the populate step is needed.  Default behavior is to
311        simply return the value of :attr:`populate_batches`.
312        """
313        return self.populate_batches
314
315    def setup_populate(self, batch, progress=None):
316        """
317        Perform any setup (caching etc.) necessary for populating a batch.
318        """
319
320    def teardown_populate(self, batch, progress=None):
321        """
322        Perform any teardown (cleanup etc.) necessary after populating a batch.
323        """
324
325    def do_populate(self, batch, user, progress=None):
326        """
327        Perform initial population for the batch, i.e. fill it with data rows.
328        Where the handler obtains the data to do this, will vary greatly.
329
330        Note that callers *should* use this method, but custom batch handlers
331        should *not* override this method.  Conversely, custom handlers
332        *should* override the :meth:`~populate()` method, but callers should
333        *not* use that one directly.
334        """
335        self.setup_populate(batch, progress=progress)
336        self.populate(batch, progress=progress)
337        self.teardown_populate(batch, progress=progress)
338        self.refresh_batch_status(batch)
339        return True
340
341    def populate(self, batch, progress=None):
342        """
343        Populate the batch with initial data rows.  It is assumed that the data
344        source to be used will be known by inspecting various properties of the
345        batch itself.
346
347        Note that callers should *not* use this method, but custom batch
348        handlers *should* override this method.  Conversely, custom handlers
349        should *not* override the :meth:`~do_populate()` method, but callers
350        *should* use that one directly.
351        """
352        raise NotImplementedError("Please implement `{}.populate()` method".format(batch.__class__.__name__))
353
354    def refreshable(self, batch):
355        """
356        This method should return a boolean indicating whether or not the
357        handler supports a "refresh" operation for the batch, given its current
358        condition.  The default assumes a refresh is allowed unless the batch
359        is executed.
360
361        Note that this (currently) only affects the enabled/disabled state of
362        the Refresh button within the Tailbone batch view.
363        """
364        if batch.executed:
365            return False
366        return True
367
368    def progress_loop(self, *args, **kwargs):
369        return progress_loop(*args, **kwargs)
370
371    def setup_refresh(self, batch, progress=None):
372        """
373        Perform any setup (caching etc.) necessary for refreshing a batch.
374        """
375
376    def teardown_refresh(self, batch, progress=None):
377        """
378        Perform any teardown (cleanup etc.) necessary after refreshing a batch.
379        """
380
381    def do_refresh(self, batch, user, progress=None):
382        self.refresh(batch, progress=progress)
383        return True
384
385    def refresh(self, batch, progress=None):
386        """
387        Perform a full data refresh for the batch.  What exactly this means will
388        depend on the type of batch, and specific handler logic.
389
390        Generally speaking this refresh is meant to use queries etc. to obtain
391        "fresh" data for the batch (header) and all its rows.  In most cases
392        certain data is expected to be "core" to the batch and/or rows, and
393        such data will be left intact, with all *other* data values being
394        re-calculated and/or reset etc.
395        """
396        session = orm.object_session(batch)
397        self.setup_refresh(batch, progress=progress)
398        if self.repopulate_when_refresh:
399            del batch.data_rows[:]
400            batch.rowcount = 0
401            session.flush()
402            self.populate(batch, progress=progress)
403        else:
404            batch.rowcount = 0
405
406            def refresh(row, i):
407                with session.no_autoflush:
408                    self.refresh_row(row)
409                if not row.removed:
410                    batch.rowcount += 1
411
412            self.progress_loop(refresh, batch.active_rows(), progress,
413                               message="Refreshing batch data rows")
414        self.refresh_batch_status(batch)
415        self.teardown_refresh(batch, progress=progress)
416        return True
417
418    def refresh_row(self, row):
419        """
420        This method will be passed a row object which has already been properly
421        added to a batch, and which has basic required fields already
422        populated.  This method is then responsible for further populating all
423        applicable fields for the row, based on current data within the
424        relevant system(s).
425
426        Note that in some cases this method may be called multiple times for
427        the same row, e.g. once when first creating the batch and then later
428        when a user explicitly refreshes the batch.  The method logic must
429        account for this possibility.
430        """
431
432    def remove_row(self, row):
433        """
434        Remove the given row from its batch.  This may delete the row outright
435        from the database, or simply mark it as removed etc.  Defaults to the
436        latter.
437        """
438        if row.removed:
439            return
440        batch = row.batch
441        row.removed = True
442        self.refresh_batch_status(batch)
443        if batch.rowcount is not None:
444            batch.rowcount -= 1
445
446    def refresh_batch_status(self, batch):
447        """
448        Update the batch status, as needed...
449        """
450
451    def mark_complete(self, batch, progress=None):
452        """
453        Mark the given batch as "complete".  This usually is just a matter of
454        setting the :attr:`~rattail.db.model.batch.BatchMixin.complete` flag
455        for the batch, with the idea that this should "freeze" the batch so
456        that another user can verify its state before finally executing it.
457
458        Each handler is of course free to expound on this idea, or to add extra
459        logic to this "event" of marking a batch complete.
460        """
461        batch.complete = True
462
463    def mark_incomplete(self, batch, progress=None):
464        """
465        Mark the given batch as "incomplete" (aka. pending).  This usually is
466        just a matter of clearing the
467        :attr:`~rattail.db.model.batch.BatchMixin.complete` flag for the batch,
468        with the idea that this should "thaw" the batch so that it may be
469        further updated, i.e. it's not yet ready to execute.
470
471        Each handler is of course free to expound on this idea, or to add extra
472        logic to this "event" of marking a batch incomplete.
473        """
474        batch.complete = False
475
476    def why_not_execute(self, batch):
477        """
478        This method should return a string indicating the reason why the given
479        batch should not be considered executable.  By default it returns
480        ``None`` which means the batch *is* to be considered executable.
481
482        Note that it is assumed the batch has not already been executed, since
483        execution is globally prevented for such batches.
484        """
485
486    def executable(self, batch):
487        """
488        This method should return a boolean indicating whether or not execution
489        should be allowed for the batch, given its current condition.  The
490        default simply returns ``True`` but you may override as needed.
491
492        Note that this (currently) only affects the enabled/disabled state of
493        the Execute button within the Tailbone batch view.
494        """
495        if batch is None:
496            return True
497        if batch.executed:
498            return False
499        if self.why_not_execute(batch):
500            return False
501        return True
502
503    def auto_executable(self, batch):
504        """
505        Must return a boolean indicating whether the given bath is eligible for
506        "automatic" execution, i.e. immediately after batch is created.
507        """
508        return False
509
510    def do_execute(self, batch, user, progress=None, **kwargs):
511        """
512        Perform final execution for the batch.  What that means for any given
513        batch, will vary greatly.
514
515        Note that callers *should* use this method, but custom batch handlers
516        should *not* override this method.  Conversely, custom handlers
517        *should* override the :meth:`~execute()` method, but callers should
518        *not* use that one directly.
519        """
520        result = self.execute(batch, user=user, progress=progress, **kwargs)
521        if not result:
522            return False
523        batch.executed = make_utc()
524        batch.executed_by = user
525        return result
526
527    def execute(self, batch, progress=None, **kwargs):
528        """
529        Execute the given batch, with given progress and kwargs.  That is an
530        intentionally generic statement, the meaning of which must be further
531        defined by the handler subclass since default is ``NotImplementedError``.
532
533        Note that callers should *not* use this method, but custom batch
534        handlers *should* override this method.  Conversely, custom handlers
535        should *not* override the :meth:`~do_execute()` method, but callers
536        *should* use that one directly.
537        """
538        raise NotImplementedError
539
540    def execute_many(self, batches, progress=None, **kwargs):
541        """
542        Execute a set of batches, with given progress and kwargs.  Default
543        behavior is to simply execute each batch in succession.  Any batches
544        which are already executed are skipped.
545        """
546        now = make_utc()
547        for batch in batches:
548            if not batch.executed:
549                self.execute(batch, progress=progress, **kwargs)
550                batch.executed = now
551                batch.executed_by = kwargs['user']
552        return True
553
554    def delete(self, batch, delete_all_data=True, progress=None, **kwargs):
555        """
556        Delete all data for the batch, including any related (e.g. row)
557        records, as well as files on disk etc.  This method should *not* delete
558        the batch itself however.
559
560        :param delete_all_data: Flag indicating whether *all* data should be
561           deleted.  You should probably set this to ``False`` if in dry-run
562           mode, since deleting *all* data often implies deleting files from
563           disk, which is not transactional and therefore can't be rolled back.
564        """
565        if delete_all_data:
566            if hasattr(batch, 'delete_data'):
567                batch.delete_data(self.config)
568        if hasattr(batch, 'data_rows'):
569            del batch.data_rows[:]
570
571    def setup_clone(self, oldbatch, progress=None):
572        """
573        Perform any setup (caching etc.) necessary for cloning batch.  Note
574        that the ``oldbatch`` arg is the "old" batch, i.e. the one from which a
575        clone is to be created.
576        """
577
578    def teardown_clone(self, newbatch, progress=None):
579        """
580        Perform any teardown (cleanup etc.) necessary after cloning a batch.
581        Note that the ``newbatch`` arg is the "new" batch, i.e. the one which
582        was just created by cloning the old batch.
583        """
584
585    def clone(self, oldbatch, created_by, progress=None):
586        """
587        Clone the given batch as a new batch, and return the new batch.
588        """
589        self.setup_clone(oldbatch, progress=progress)
590        batch_class = self.batch_model_class
591        batch_mapper = orm.class_mapper(batch_class)
592
593        newbatch = batch_class()
594        newbatch.created_by = created_by
595        newbatch.rowcount = 0
596        for name in batch_mapper.columns.keys():
597            if name not in ('uuid', 'id', 'created', 'created_by_uuid', 'rowcount', 'executed', 'executed_by_uuid'):
598                setattr(newbatch, name, getattr(oldbatch, name))
599
600        session = orm.object_session(oldbatch)
601        session.add(newbatch)
602        session.flush()
603
604        row_class = newbatch.row_class
605        row_mapper = orm.class_mapper(row_class)
606
607        def clone_row(oldrow, i):
608            newrow = self.clone_row(oldrow)
609            self.add_row(newbatch, newrow)
610
611        self.progress_loop(clone_row, oldbatch.data_rows, progress,
612                           message="Cloning data rows for new batch")
613
614        self.refresh_batch_status(newbatch)
615        self.teardown_clone(newbatch, progress=progress)
616        return newbatch
617
618    def clone_row(self, oldrow):
619        row_class = self.batch_model_class.row_class
620        row_mapper = orm.class_mapper(row_class)
621        newrow = row_class()
622        for name in row_mapper.columns.keys():
623            if name not in ('uuid', 'batch_uuid', 'sequence'):
624                setattr(newrow, name, getattr(oldrow, name))
625        return newrow
626
627    def cache_model(self, session, model, **kwargs):
628        return cache_model(session, model, **kwargs)
629
630
631def get_batch_types(config):
632    """
633    Returns the list of available batch type keys.
634    """
635    model = config.get_model()
636
637    keys = []
638    for name in dir(model):
639        if name == 'BatchMixin':
640            continue
641        obj = getattr(model, name)
642        if isinstance(obj, type):
643            if issubclass(obj, model.Base):
644                if issubclass(obj, model.BatchMixin):
645                    keys.append(obj.batch_key)
646
647    keys.sort()
648    return keys
649
650
651def get_batch_handler(config, batch_key, default=None, error=True):
652    """
653    Returns a batch handler object corresponding to the given batch key.
654    """
655    spec = config.get('rattail.batch', '{}.handler'.format(batch_key), default=default)
656    if error and not spec:
657        raise ValueError("handler spec not found for batch type: {}".format(batch_key))
658    handler = load_object(spec)(config)
659    return handler
Note: See TracBrowser for help on using the repository browser.