Module pyinotify
[hide private]
[frames] | no frames]

Source Code for Module pyinotify

   1  #!/usr/bin/env python 
   2  # -*- coding: iso-8859-1 -*- 
   3  # 
   4  # pyinotify.py - python interface to inotify 
   5  # Copyright (C) Sebastien Martini <sebastien.martini@gmail.com> 
   6  # 
   7  # This program is free software; you can redistribute it and/or 
   8  # modify it under the terms of the GNU General Public License 
   9  # as published by the Free Software Foundation; either version 2 
  10  # of the License, or (at your option) any later version. 
  11  # 
  12  # This program is distributed in the hope that it will be useful, 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
  15  # GNU General Public License for more details. 
  16  # 
  17  # You should have received a copy of the GNU General Public License 
  18  # along with this program; if not, write to the Free Software 
  19  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  20  # 02110-1301, USA. 
  21   
  22  """ 
  23  pyinotify 
  24   
  25  @author: Sebastien Martini 
  26  @license: GPLv2+ 
  27  @contact: seb@dbzteam.org 
  28  """ 
29 30 -class PyinotifyError(Exception):
31 """Indicates exceptions raised by a Pyinotify class.""" 32 pass
33
34 35 -class UnsupportedPythonVersionError(PyinotifyError):
36 """ 37 Raised on unsupported Python versions. 38 """
39 - def __init__(self, version):
40 """ 41 @param version: Current Python version 42 @type version: string 43 """ 44 PyinotifyError.__init__(self, 45 ('Python %s is unsupported, requires ' 46 'at least Python 2.4') % version)
47
48 49 -class UnsupportedLibcVersionError(PyinotifyError):
50 """ 51 Raised on unsupported libc versions. 52 """
53 - def __init__(self, version):
54 """ 55 @param version: Current Libc version 56 @type version: string 57 """ 58 PyinotifyError.__init__(self, 59 ('Libc %s is not supported, requires ' 60 'at least Libc 2.4') % version)
61 62 63 # Check Python version 64 import sys 65 if sys.version < '2.4': 66 raise UnsupportedPythonVersionError(sys.version) 67 68 69 # Import directives 70 import threading 71 import os 72 import select 73 import struct 74 import fcntl 75 import errno 76 import termios 77 import array 78 import logging 79 import atexit 80 from collections import deque 81 from datetime import datetime, timedelta 82 import time 83 import fnmatch 84 import re 85 import ctypes 86 import ctypes.util 87 import asyncore 88 89 __author__ = "seb@dbzteam.org (Sebastien Martini)" 90 91 __version__ = "0.8.8" 92 93 __metaclass__ = type # Use new-style classes by default 94 95 96 # Compatibity mode: set to True to improve compatibility with 97 # Pyinotify 0.7.1. Do not set this variable yourself, call the 98 # function compatibility_mode() instead. 99 COMPATIBILITY_MODE = False 100 101 102 # Load libc 103 LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c')) 104 105 # The libc version > 2.4 check. 106 # XXX: Maybe it is better to check if the libc has the needed functions inside? 107 # Because there are inotify patches for libc 2.3.6. 108 LIBC.gnu_get_libc_version.restype = ctypes.c_char_p 109 LIBC_VERSION = LIBC.gnu_get_libc_version() 110 if (int(LIBC_VERSION.split('.')[0]) < 2 or 111 (int(LIBC_VERSION.split('.')[0]) == 2 and 112 int(LIBC_VERSION.split('.')[1]) < 4)): 113 raise UnsupportedLibcVersionError(LIBC_VERSION)
114 115 116 -class PyinotifyLogger(logging.Logger):
117 """ 118 Pyinotify logger used for logging unicode strings. 119 """
120 - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, 121 extra=None):
122 rv = UnicodeLogRecord(name, level, fn, lno, msg, args, exc_info, func) 123 if extra is not None: 124 for key in extra: 125 if (key in ["message", "asctime"]) or (key in rv.__dict__): 126 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 127 rv.__dict__[key] = extra[key] 128 return rv
129
130 131 -class UnicodeLogRecord(logging.LogRecord):
132 - def __init__(self, name, level, pathname, lineno, 133 msg, args, exc_info, func=None):
134 py_version = sys.version_info 135 # func argument was added in Python 2.5, just ignore it otherwise. 136 if py_version[0] >= 2 and py_version[1] >= 5: 137 logging.LogRecord.__init__(self, name, level, pathname, lineno, 138 msg, args, exc_info, func) 139 else: 140 logging.LogRecord.__init__(self, name, level, pathname, lineno, 141 msg, args, exc_info)
142
143 - def getMessage(self):
144 msg = self.msg 145 if not isinstance(msg, (unicode, str)): 146 try: 147 msg = str(self.msg) 148 except UnicodeError: 149 pass 150 if self.args: 151 if isinstance(self.args, tuple): 152 def str_to_unicode(s): 153 """Return unicode string.""" 154 if not isinstance(s, str): 155 return s 156 return unicode(s, sys.getfilesystemencoding())
157 args = tuple([str_to_unicode(m) for m in self.args]) 158 else: 159 args = self.args 160 msg = msg % args 161 if not isinstance(msg, unicode): 162 msg = unicode(msg, sys.getfilesystemencoding()) 163 return msg
164
165 166 # Logging 167 -def logger_init():
168 """Initialize logger instance.""" 169 logging.setLoggerClass(PyinotifyLogger) 170 log = logging.getLogger("pyinotify") 171 console_handler = logging.StreamHandler() 172 console_handler.setFormatter( 173 logging.Formatter("[Pyinotify %(levelname)s] %(message)s")) 174 log.addHandler(console_handler) 175 log.setLevel(20) 176 return log
177 178 log = logger_init()
179 180 181 182 ### inotify's variables ### 183 184 185 -class SysCtlINotify:
186 """ 187 Access (read, write) inotify's variables through sysctl. Usually it 188 requires administrator rights to update them. 189 190 Examples: 191 - Read max_queued_events attribute: myvar = max_queued_events.value 192 - Update max_queued_events attribute: max_queued_events.value = 42 193 """ 194 195 inotify_attrs = {'max_user_instances': 1, 196 'max_user_watches': 2, 197 'max_queued_events': 3} 198
199 - def __init__(self, attrname):
200 sino = ctypes.c_int * 3 201 self._attrname = attrname 202 self._attr = sino(5, 20, SysCtlINotify.inotify_attrs[attrname])
203
204 - def get_val(self):
205 """ 206 Gets attribute's value. 207 208 @return: stored value. 209 @rtype: int 210 """ 211 oldv = ctypes.c_int(0) 212 size = ctypes.c_int(ctypes.sizeof(oldv)) 213 LIBC.sysctl(self._attr, 3, 214 ctypes.c_voidp(ctypes.addressof(oldv)), 215 ctypes.addressof(size), 216 None, 0) 217 return oldv.value
218
219 - def set_val(self, nval):
220 """ 221 Sets new attribute's value. 222 223 @param nval: replaces current value by nval. 224 @type nval: int 225 """ 226 oldv = ctypes.c_int(0) 227 sizeo = ctypes.c_int(ctypes.sizeof(oldv)) 228 newv = ctypes.c_int(nval) 229 sizen = ctypes.c_int(ctypes.sizeof(newv)) 230 LIBC.sysctl(self._attr, 3, 231 ctypes.c_voidp(ctypes.addressof(oldv)), 232 ctypes.addressof(sizeo), 233 ctypes.c_voidp(ctypes.addressof(newv)), 234 ctypes.addressof(sizen))
235 236 value = property(get_val, set_val) 237
238 - def __repr__(self):
239 return '<%s=%d>' % (self._attrname, self.get_val())
240 241 242 # Singleton instances 243 # 244 # read: myvar = max_queued_events.value 245 # update: max_queued_events.value = 42 246 # 247 for attrname in ('max_queued_events', 'max_user_instances', 'max_user_watches'): 248 globals()[attrname] = SysCtlINotify(attrname)
249 250 251 252 ### iglob ### 253 254 255 # Code taken from standart Python Lib, slightly modified in order to work 256 # with pyinotify (don't exclude dotted files/dirs like .foo). 257 # Original version: 258 # @see: http://svn.python.org/projects/python/trunk/Lib/glob.py 259 260 -def iglob(pathname):
261 if not has_magic(pathname): 262 if hasattr(os.path, 'lexists'): 263 if os.path.lexists(pathname): 264 yield pathname 265 else: 266 if os.path.islink(pathname) or os.path.exists(pathname): 267 yield pathname 268 return 269 dirname, basename = os.path.split(pathname) 270 # relative pathname 271 if not dirname: 272 return 273 # absolute pathname 274 if has_magic(dirname): 275 dirs = iglob(dirname) 276 else: 277 dirs = [dirname] 278 if has_magic(basename): 279 glob_in_dir = glob1 280 else: 281 glob_in_dir = glob0 282 for dirname in dirs: 283 for name in glob_in_dir(dirname, basename): 284 yield os.path.join(dirname, name)
285
286 -def glob1(dirname, pattern):
287 if not dirname: 288 dirname = os.curdir 289 try: 290 names = os.listdir(dirname) 291 except os.error: 292 return [] 293 return fnmatch.filter(names, pattern)
294
295 -def glob0(dirname, basename):
296 if basename == '' and os.path.isdir(dirname): 297 # `os.path.split()` returns an empty basename for paths ending with a 298 # directory separator. 'q*x/' should match only directories. 299 return [basename] 300 if hasattr(os.path, 'lexists'): 301 if os.path.lexists(os.path.join(dirname, basename)): 302 return [basename] 303 else: 304 if (os.path.islink(os.path.join(dirname, basename)) or 305 os.path.exists(os.path.join(dirname, basename))): 306 return [basename] 307 return []
308 309 MAGIC_CHECK = re.compile('[*?[]')
310 311 -def has_magic(s):
312 return MAGIC_CHECK.search(s) is not None
313
314 315 316 ### Core ### 317 318 319 -class EventsCodes:
320 """ 321 Set of codes corresponding to each kind of events. 322 Some of these flags are used to communicate with inotify, whereas 323 the others are sent to userspace by inotify notifying some events. 324 325 @cvar IN_ACCESS: File was accessed. 326 @type IN_ACCESS: int 327 @cvar IN_MODIFY: File was modified. 328 @type IN_MODIFY: int 329 @cvar IN_ATTRIB: Metadata changed. 330 @type IN_ATTRIB: int 331 @cvar IN_CLOSE_WRITE: Writtable file was closed. 332 @type IN_CLOSE_WRITE: int 333 @cvar IN_CLOSE_NOWRITE: Unwrittable file closed. 334 @type IN_CLOSE_NOWRITE: int 335 @cvar IN_OPEN: File was opened. 336 @type IN_OPEN: int 337 @cvar IN_MOVED_FROM: File was moved from X. 338 @type IN_MOVED_FROM: int 339 @cvar IN_MOVED_TO: File was moved to Y. 340 @type IN_MOVED_TO: int 341 @cvar IN_CREATE: Subfile was created. 342 @type IN_CREATE: int 343 @cvar IN_DELETE: Subfile was deleted. 344 @type IN_DELETE: int 345 @cvar IN_DELETE_SELF: Self (watched item itself) was deleted. 346 @type IN_DELETE_SELF: int 347 @cvar IN_MOVE_SELF: Self (watched item itself) was moved. 348 @type IN_MOVE_SELF: int 349 @cvar IN_UNMOUNT: Backing fs was unmounted. 350 @type IN_UNMOUNT: int 351 @cvar IN_Q_OVERFLOW: Event queued overflowed. 352 @type IN_Q_OVERFLOW: int 353 @cvar IN_IGNORED: File was ignored. 354 @type IN_IGNORED: int 355 @cvar IN_ONLYDIR: only watch the path if it is a directory (new 356 in kernel 2.6.15). 357 @type IN_ONLYDIR: int 358 @cvar IN_DONT_FOLLOW: don't follow a symlink (new in kernel 2.6.15). 359 IN_ONLYDIR we can make sure that we don't watch 360 the target of symlinks. 361 @type IN_DONT_FOLLOW: int 362 @cvar IN_MASK_ADD: add to the mask of an already existing watch (new 363 in kernel 2.6.14). 364 @type IN_MASK_ADD: int 365 @cvar IN_ISDIR: Event occurred against dir. 366 @type IN_ISDIR: int 367 @cvar IN_ONESHOT: Only send event once. 368 @type IN_ONESHOT: int 369 @cvar ALL_EVENTS: Alias for considering all of the events. 370 @type ALL_EVENTS: int 371 """ 372 373 # The idea here is 'configuration-as-code' - this way, we get our nice class 374 # constants, but we also get nice human-friendly text mappings to do lookups 375 # against as well, for free: 376 FLAG_COLLECTIONS = {'OP_FLAGS': { 377 'IN_ACCESS' : 0x00000001, # File was accessed 378 'IN_MODIFY' : 0x00000002, # File was modified 379 'IN_ATTRIB' : 0x00000004, # Metadata changed 380 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed 381 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed 382 'IN_OPEN' : 0x00000020, # File was opened 383 'IN_MOVED_FROM' : 0x00000040, # File was moved from X 384 'IN_MOVED_TO' : 0x00000080, # File was moved to Y 385 'IN_CREATE' : 0x00000100, # Subfile was created 386 'IN_DELETE' : 0x00000200, # Subfile was deleted 387 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself) 388 # was deleted 389 'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved 390 }, 391 'EVENT_FLAGS': { 392 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted 393 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed 394 'IN_IGNORED' : 0x00008000, # File was ignored 395 }, 396 'SPECIAL_FLAGS': { 397 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a 398 # directory 399 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink 400 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already 401 # existing watch 402 'IN_ISDIR' : 0x40000000, # event occurred against dir 403 'IN_ONESHOT' : 0x80000000, # only send event once 404 }, 405 } 406
407 - def maskname(mask):
408 """ 409 Returns the event name associated to mask. IN_ISDIR is appended to 410 the result when appropriate. Note: only one event is returned, because 411 only one event can be raised at a given time. 412 413 @param mask: mask. 414 @type mask: int 415 @return: event name. 416 @rtype: str 417 """ 418 ms = mask 419 name = '%s' 420 if mask & IN_ISDIR: 421 ms = mask - IN_ISDIR 422 name = '%s|IN_ISDIR' 423 return name % EventsCodes.ALL_VALUES[ms]
424 425 maskname = staticmethod(maskname)
426 427 428 # So let's now turn the configuration into code 429 EventsCodes.ALL_FLAGS = {} 430 EventsCodes.ALL_VALUES = {} 431 for flagc, valc in EventsCodes.FLAG_COLLECTIONS.iteritems(): 432 # Make the collections' members directly accessible through the 433 # class dictionary 434 setattr(EventsCodes, flagc, valc) 435 436 # Collect all the flags under a common umbrella 437 EventsCodes.ALL_FLAGS.update(valc) 438 439 # Make the individual masks accessible as 'constants' at globals() scope 440 # and masknames accessible by values. 441 for name, val in valc.iteritems(): 442 globals()[name] = val 443 EventsCodes.ALL_VALUES[val] = name 444 445 446 # all 'normal' events 447 ALL_EVENTS = reduce(lambda x, y: x | y, EventsCodes.OP_FLAGS.itervalues()) 448 EventsCodes.ALL_FLAGS['ALL_EVENTS'] = ALL_EVENTS 449 EventsCodes.ALL_VALUES[ALL_EVENTS] = 'ALL_EVENTS'
450 451 452 -class _Event:
453 """ 454 Event structure, represent events raised by the system. This 455 is the base class and should be subclassed. 456 457 """
458 - def __init__(self, dict_):
459 """ 460 Attach attributes (contained in dict_) to self. 461 462 @param dict_: Set of attributes. 463 @type dict_: dictionary 464 """ 465 for tpl in dict_.iteritems(): 466 setattr(self, *tpl)
467
468 - def __repr__(self):
469 """ 470 @return: Generic event string representation. 471 @rtype: str 472 """ 473 s = '' 474 for attr, value in sorted(self.__dict__.items(), key=lambda x: x[0]): 475 if attr.startswith('_'): 476 continue 477 if attr == 'mask': 478 value = hex(getattr(self, attr)) 479 elif isinstance(value, basestring) and not value: 480 value = "''" 481 s += ' %s%s%s' % (Color.field_name(attr), 482 Color.punctuation('='), 483 Color.field_value(value)) 484 485 s = '%s%s%s %s' % (Color.punctuation('<'), 486 Color.class_name(self.__class__.__name__), 487 s, 488 Color.punctuation('>')) 489 return s
490
491 492 -class _RawEvent(_Event):
493 """ 494 Raw event, it contains only the informations provided by the system. 495 It doesn't infer anything. 496 """
497 - def __init__(self, wd, mask, cookie, name):
498 """ 499 @param wd: Watch Descriptor. 500 @type wd: int 501 @param mask: Bitmask of events. 502 @type mask: int 503 @param cookie: Cookie. 504 @type cookie: int 505 @param name: Basename of the file or directory against which the 506 event was raised in case where the watched directory 507 is the parent directory. None if the event was raised 508 on the watched item itself. 509 @type name: string or None 510 """ 511 # name: remove trailing '\0' 512 super(_RawEvent, self).__init__({'wd': wd, 513 'mask': mask, 514 'cookie': cookie, 515 'name': name.rstrip('\0')}) 516 log.debug(repr(self))
517
518 519 -class Event(_Event):
520 """ 521 This class contains all the useful informations about the observed 522 event. However, the presence of each field is not guaranteed and 523 depends on the type of event. In effect, some fields are irrelevant 524 for some kind of event (for example 'cookie' is meaningless for 525 IN_CREATE whereas it is mandatory for IN_MOVE_TO). 526 527 The possible fields are: 528 - wd (int): Watch Descriptor. 529 - mask (int): Mask. 530 - maskname (str): Readable event name. 531 - path (str): path of the file or directory being watched. 532 - name (str): Basename of the file or directory against which the 533 event was raised in case where the watched directory 534 is the parent directory. None if the event was raised 535 on the watched item itself. This field is always provided 536 even if the string is ''. 537 - pathname (str): Concatenation of 'path' and 'name'. 538 - src_pathname (str): Only present for IN_MOVED_TO events and only in 539 the case where IN_MOVED_FROM events are watched too. Holds the 540 source pathname from where pathname was moved from. 541 - cookie (int): Cookie. 542 - dir (bool): True if the event was raised against a directory. 543 544 """
545 - def __init__(self, raw):
546 """ 547 Concretely, this is the raw event plus inferred infos. 548 """ 549 _Event.__init__(self, raw) 550 self.maskname = EventsCodes.maskname(self.mask) 551 if COMPATIBILITY_MODE: 552 self.event_name = self.maskname 553 try: 554 if self.name: 555 self.pathname = os.path.abspath(os.path.join(self.path, 556 self.name)) 557 else: 558 self.pathname = os.path.abspath(self.path) 559 except AttributeError, err: 560 log.error(err)
561
562 563 -class ProcessEventError(PyinotifyError):
564 """ 565 ProcessEventError Exception. Raised on ProcessEvent error. 566 """
567 - def __init__(self, err):
568 """ 569 @param err: Exception error description. 570 @type err: string 571 """ 572 PyinotifyError.__init__(self, err)
573
574 575 -class _ProcessEvent:
576 """ 577 Abstract processing event class. 578 """
579 - def __call__(self, event):
580 """ 581 To behave like a functor the object must be callable. 582 This method is a dispatch method. Its lookup order is: 583 1. process_MASKNAME method 584 2. process_FAMILY_NAME method 585 3. otherwise calls process_default 586 587 @param event: Event to be processed. 588 @type event: Event object 589 @return: By convention when used from the ProcessEvent class: 590 - Returning False or None (default value) means keep on 591 executing next chained functors (see chain.py example). 592 - Returning True instead means do not execute next 593 processing functions. 594 @rtype: bool 595 @raise ProcessEventError: Event object undispatchable, 596 unknown event. 597 """ 598 stripped_mask = event.mask - (event.mask & IN_ISDIR) 599 maskname = EventsCodes.ALL_VALUES.get(stripped_mask) 600 if maskname is None: 601 raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask) 602 603 # 1- look for process_MASKNAME 604 meth = getattr(self, 'process_' + maskname, None) 605 if meth is not None: 606 return meth(event) 607 # 2- look for process_FAMILY_NAME 608 meth = getattr(self, 'process_IN_' + maskname.split('_')[1], None) 609 if meth is not None: 610 return meth(event) 611 # 3- default call method process_default 612 return self.process_default(event)
613
614 - def __repr__(self):
615 return '<%s>' % self.__class__.__name__
616
617 618 -class _SysProcessEvent(_ProcessEvent):
619 """ 620 There is three kind of processing according to each event: 621 622 1. special handling (deletion from internal container, bug, ...). 623 2. default treatment: which is applied to the majority of events. 624 3. IN_ISDIR is never sent alone, he is piggybacked with a standard 625 event, he is not processed as the others events, instead, its 626 value is captured and appropriately aggregated to dst event. 627 """
628 - def __init__(self, wm, notifier):
629 """ 630 631 @param wm: Watch Manager. 632 @type wm: WatchManager instance 633 @param notifier: Notifier. 634 @type notifier: Notifier instance 635 """ 636 self._watch_manager = wm # watch manager 637 self._notifier = notifier # notifier 638 self._mv_cookie = {} # {cookie(int): (src_path(str), date), ...} 639 self._mv = {} # {src_path(str): (dst_path(str), date), ...}
640
641 - def cleanup(self):
642 """ 643 Cleanup (delete) old (>1mn) records contained in self._mv_cookie 644 and self._mv. 645 """ 646 date_cur_ = datetime.now() 647 for seq in [self._mv_cookie, self._mv]: 648 for k in seq.keys(): 649 if (date_cur_ - seq[k][1]) > timedelta(minutes=1): 650 log.debug('Cleanup: deleting entry %s', seq[k][0]) 651 del seq[k]
652
653 - def process_IN_CREATE(self, raw_event):
654 """ 655 If the event affects a directory and the auto_add flag of the 656 targetted watch is set to True, a new watch is added on this 657 new directory, with the same attribute values than those of 658 this watch. 659 """ 660 if raw_event.mask & IN_ISDIR: 661 watch_ = self._watch_manager.get_watch(raw_event.wd) 662 if watch_.auto_add: 663 addw = self._watch_manager.add_watch 664 newwd = addw(os.path.join(watch_.path, raw_event.name), 665 watch_.mask, proc_fun=watch_.proc_fun, 666 rec=False, auto_add=watch_.auto_add) 667 668 # Trick to handle mkdir -p /t1/t2/t3 where t1 is watched and 669 # t2 and t3 are created. 670 # Since the directory is new, then everything inside it 671 # must also be new. 672 base = os.path.join(watch_.path, raw_event.name) 673 if newwd[base] > 0: 674 for name in os.listdir(base): 675 inner = os.path.join(base, name) 676 if (os.path.isdir(inner) and 677 self._watch_manager.get_wd(inner) is None): 678 # Generate (simulate) creation event for sub 679 # directories. 680 rawevent = _RawEvent(newwd[base], 681 IN_CREATE | IN_ISDIR, 682 0, name) 683 self._notifier.append_event(rawevent) 684 return self.process_default(raw_event)
685
686 - def process_IN_MOVED_FROM(self, raw_event):
687 """ 688 Map the cookie with the source path (+ date for cleaning). 689 """ 690 watch_ = self._watch_manager.get_watch(raw_event.wd) 691 path_ = watch_.path 692 src_path = os.path.normpath(os.path.join(path_, raw_event.name)) 693 self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) 694 return self.process_default(raw_event, {'cookie': raw_event.cookie})
695
696 - def process_IN_MOVED_TO(self, raw_event):
697 """ 698 Map the source path with the destination path (+ date for 699 cleaning). 700 """ 701 watch_ = self._watch_manager.get_watch(raw_event.wd) 702 path_ = watch_.path 703 dst_path = os.path.normpath(os.path.join(path_, raw_event.name)) 704 mv_ = self._mv_cookie.get(raw_event.cookie) 705 to_append = {'cookie': raw_event.cookie} 706 if mv_ is not None: 707 self._mv[mv_[0]] = (dst_path, datetime.now()) 708 # Let's assume that IN_MOVED_FROM event is always queued before 709 # that its associated (they share a common cookie) IN_MOVED_TO 710 # event is queued itself. It is then possible in that scenario 711 # to provide as additional information to the IN_MOVED_TO event 712 # the original pathname of the moved file/directory. 713 to_append['src_pathname'] = mv_[0] 714 elif raw_event.mask & IN_ISDIR and watch_.auto_add: 715 # We got a diretory that's "moved in" from an unknown source and 716 # auto_add is enabled. Manually add watches to the inner subtrees. 717 self._watch_manager.add_watch(dst_path, watch_.mask, 718 proc_fun=watch_.proc_fun, 719 rec=True, auto_add=True) 720 return self.process_default(raw_event, to_append)
721
722 - def process_IN_MOVE_SELF(self, raw_event):
723 """ 724 STATUS: the following bug has been fixed in recent kernels (FIXME: 725 which version ?). Now it raises IN_DELETE_SELF instead. 726 727 Old kernels were bugged, this event raised when the watched item 728 were moved, so we had to update its path, but under some circumstances 729 it was impossible: if its parent directory and its destination 730 directory wasn't watched. The kernel (see include/linux/fsnotify.h) 731 doesn't bring us enough informations like the destination path of 732 moved items. 733 """ 734 watch_ = self._watch_manager.get_watch(raw_event.wd) 735 src_path = watch_.path 736 mv_ = self._mv.get(src_path) 737 if mv_: 738 dest_path = mv_[0] 739 watch_.path = dest_path 740 src_path_len = len(src_path) 741 sep_len = len(os.path.sep) 742 # The next loop renames all watches with src_path as base path. 743 # It seems that IN_MOVE_SELF does not provide IN_ISDIR information 744 # therefore the next loop is iterated even if raw_event is a file. 745 for w in self._watch_manager.watches.itervalues(): 746 if w.path.startswith(src_path): 747 # Note that dest_path is a normalized path. 748 w.path = os.path.join(dest_path, 749 w.path[src_path_len + sep_len:]) 750 else: 751 log.error("The pathname '%s' of this watch %s has probably changed " 752 "and couldn't be updated, so it cannot be trusted " 753 "anymore. To fix this error move directories/files only " 754 "between watched parents directories, in this case e.g. " 755 "put a watch on '%s'.", 756 watch_.path, watch_, 757 os.path.normpath(os.path.join(watch_.path, 758 os.path.pardir))) 759 if not watch_.path.endswith('-unknown-path'): 760 watch_.path += '-unknown-path' 761 return self.process_default(raw_event)
762
763 - def process_IN_Q_OVERFLOW(self, raw_event):
764 """ 765 Only signal an overflow, most of the common flags are irrelevant 766 for this event (path, wd, name). 767 """ 768 return Event({'mask': raw_event.mask})
769
770 - def process_IN_IGNORED(self, raw_event):
771 """ 772 The watch descriptor raised by this event is now ignored (forever), 773 it can be safely deleted from the watch manager dictionary. 774 After this event we can be sure that neither the event queue nor 775 the system will raise an event associated to this wd again. 776 """ 777 event_ = self.process_default(raw_event) 778 self._watch_manager.del_watch(raw_event.wd) 779 return event_
780
781 - def process_default(self, raw_event, to_append=None):
782 """ 783 Commons handling for the followings events: 784 785 IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, 786 IN_OPEN, IN_DELETE, IN_DELETE_SELF, IN_UNMOUNT. 787 """ 788 watch_ = self._watch_manager.get_watch(raw_event.wd) 789 if raw_event.mask & (IN_DELETE_SELF | IN_MOVE_SELF): 790 # Unfornulately this information is not provided by the kernel 791 dir_ = watch_.dir 792 else: 793 dir_ = bool(raw_event.mask & IN_ISDIR) 794 dict_ = {'wd': raw_event.wd, 795 'mask': raw_event.mask, 796 'path': watch_.path, 797 'name': raw_event.name, 798 'dir': dir_} 799 if COMPATIBILITY_MODE: 800 dict_['is_dir'] = dir_ 801 if to_append is not None: 802 dict_.update(to_append) 803 return Event(dict_)
804
805 806 -class ProcessEvent(_ProcessEvent):
807 """ 808 Process events objects, can be specialized via subclassing, thus its 809 behavior can be overriden: 810 811 Note: you should not override __init__ in your subclass instead define 812 a my_init() method, this method will be called from the constructor of 813 this class with its optional parameters. 814 815 1. Provide specialized individual methods, e.g. process_IN_DELETE for 816 processing a precise type of event (e.g. IN_DELETE in this case). 817 2. Or/and provide methods for processing events by 'family', e.g. 818 process_IN_CLOSE method will process both IN_CLOSE_WRITE and 819 IN_CLOSE_NOWRITE events (if process_IN_CLOSE_WRITE and 820 process_IN_CLOSE_NOWRITE aren't defined though). 821 3. Or/and override process_default for catching and processing all 822 the remaining types of events. 823 """ 824 pevent = None 825
826 - def __init__(self, pevent=None, **kargs):
827 """ 828 Enable chaining of ProcessEvent instances. 829 830 @param pevent: Optional callable object, will be called on event 831 processing (before self). 832 @type pevent: callable 833 @param kargs: This constructor is a implemented as a template method 834 delegating its optionals keyworded arguments to the method 835 my_init(). 836 @type kargs: dict 837 """ 838 self.pevent = pevent 839 self.my_init(**kargs)
840
841 - def my_init(self, **kargs):
842 """ 843 This method is called from base constructor ProcessEvent.__init__(). 844 This method is useless here and is meant to be redifined in a 845 subclass of ProcessEvent. In effect, when subclassing just override 846 this method if you want to provide custom initialization to your 847 subclass' instance. You MUST pass keyword arguments though. 848 849 @param kargs: optional delegated arguments from __init__(). 850 @type kargs: dict 851 """ 852 pass
853
854 - def __call__(self, event):
855 stop_chaining = False 856 if self.pevent is not None: 857 # By default methods return None so we set as guideline 858 # that methods asking for stop chaining must explicitely 859 # return non None or non False values, otherwise the default 860 # behavior will be to accept chain call to the corresponding 861 # local method. 862 stop_chaining = self.pevent(event) 863 if not stop_chaining: 864 return _ProcessEvent.__call__(self, event)
865
866 - def nested_pevent(self):
867 return self.pevent
868
869 - def process_IN_Q_OVERFLOW(self, event):
870 """ 871 By default this method only reports warning messages, you can overredide 872 it by subclassing ProcessEvent and implement your own 873 process_IN_Q_OVERFLOW method. The actions you can take on receiving this 874 event is either to update the variable max_queued_events in order to 875 handle more simultaneous events or to modify your code in order to 876 accomplish a better filtering diminishing the number of raised events. 877 Because this method is defined, IN_Q_OVERFLOW will never get 878 transmitted as arguments to process_default calls. 879 880 @param event: IN_Q_OVERFLOW event. 881 @type event: dict 882 """ 883 log.warning('Event queue overflowed.')
884
885 - def process_default(self, event):
886 """ 887 Default processing event method. By default does nothing. Subclass 888 ProcessEvent and redefine this method in order to modify its behavior. 889 890 @param event: Event to be processed. Can be of any type of events but 891 IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). 892 @type event: Event instance 893 """ 894 pass
895
896 897 -class PrintAllEvents(ProcessEvent):
898 """ 899 Dummy class used to print events strings representations. For instance this 900 class is used from command line to print all received events to stdout. 901 """
902 - def my_init(self, out=None):
903 """ 904 @param out: Where events will be written. 905 @type out: Object providing a valid file object interface. 906 """ 907 if out is None: 908 out = sys.stdout 909 self._out = out
910
911 - def process_default(self, event):
912 """ 913 Writes event string representation to file object provided to 914 my_init(). 915 916 @param event: Event to be processed. Can be of any type of events but 917 IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). 918 @type event: Event instance 919 """ 920 self._out.write(repr(event) + '\n')
921
922 923 -class ChainIfTrue(ProcessEvent):
924 """ 925 Makes conditional chaining depending on the result of the nested 926 processing instance. 927 """
928 - def my_init(self, func):
929 """ 930 Method automatically called from base class constructor. 931 """ 932 self._func = func
933
934 - def process_default(self, event):
935 return not self._func(event)
936
937 938 -class Stats(ProcessEvent):
939 """ 940 Compute and display trivial statistics about processed events. 941 """
942 - def my_init(self):
943 """ 944 Method automatically called from base class constructor. 945 """ 946 self._start_time = time.time() 947 self._stats = {} 948 self._stats_lock = threading.Lock()
949
950 - def process_default(self, event):
951 """ 952 Processes |event|. 953 """ 954 self._stats_lock.acquire() 955 try: 956 events = event.maskname.split('|') 957 for event_name in events: 958 count = self._stats.get(event_name, 0) 959 self._stats[event_name] = count + 1 960 finally: 961 self._stats_lock.release()
962
963 - def _stats_copy(self):
964 self._stats_lock.acquire() 965 try: 966 return self._stats.copy() 967 finally: 968 self._stats_lock.release()
969
970 - def __repr__(self):
971 stats = self._stats_copy() 972 973 elapsed = int(time.time() - self._start_time) 974 elapsed_str = '' 975 if elapsed < 60: 976 elapsed_str = str(elapsed) + 'sec' 977 elif 60 <= elapsed < 3600: 978 elapsed_str = '%dmn%dsec' % (elapsed / 60, elapsed % 60) 979 elif 3600 <= elapsed < 86400: 980 elapsed_str = '%dh%dmn' % (elapsed / 3600, (elapsed % 3600) / 60) 981 elif elapsed >= 86400: 982 elapsed_str = '%dd%dh' % (elapsed / 86400, (elapsed % 86400) / 3600) 983 stats['ElapsedTime'] = elapsed_str 984 985 l = [] 986 for ev, value in sorted(stats.items(), key=lambda x: x[0]): 987 l.append(' %s=%s' % (Color.field_name(ev), 988 Color.field_value(value))) 989 s = '<%s%s >' % (Color.class_name(self.__class__.__name__), 990 ''.join(l)) 991 return s
992
993 - def dump(self, filename):
994 """ 995 Dumps statistics to file |filename|. 996 997 @param filename: pathname. 998 @type filename: string 999 """ 1000 file_obj = file(filename, 'wb') 1001 try: 1002 file_obj.write(str(self)) 1003 finally: 1004 file_obj.close()
1005
1006 - def __str__(self, scale=45):
1007 stats = self._stats_copy() 1008 if not stats: 1009 return '' 1010 1011 m = max(stats.values()) 1012 unity = int(round(float(m) / scale)) or 1 1013 fmt = '%%-26s%%-%ds%%s' % (len(Color.field_value('@' * scale)) 1014 + 1) 1015 def func(x): 1016 return fmt % (Color.field_name(x[0]), 1017 Color.field_value('@' * (x[1] / unity)), 1018 Color.simple('%d' % x[1], 'yellow'))
1019 s = '\n'.join(map(func, sorted(stats.items(), key=lambda x: x[0]))) 1020 return s
1021
1022 1023 -class NotifierError(PyinotifyError):
1024 """ 1025 Notifier Exception. Raised on Notifier error. 1026 1027 """
1028 - def __init__(self, err):
1029 """ 1030 @param err: Exception string's description. 1031 @type err: string 1032 """ 1033 PyinotifyError.__init__(self, err)
1034
1035 1036 -class Notifier:
1037 """ 1038 Read notifications, process events. 1039 1040 """
1041 - def __init__(self, watch_manager, default_proc_fun=ProcessEvent(), 1042 read_freq=0, threshold=0, timeout=None):
1043 """ 1044 Initialization. read_freq, threshold and timeout parameters are used 1045 when looping. 1046 1047 @param watch_manager: Watch Manager. 1048 @type watch_manager: WatchManager instance 1049 @param default_proc_fun: Default processing method. 1050 @type default_proc_fun: instance of ProcessEvent 1051 @param read_freq: if read_freq == 0, events are read asap, 1052 if read_freq is > 0, this thread sleeps 1053 max(0, read_freq - timeout) seconds. But if 1054 timeout is None it can be different because 1055 poll is blocking waiting for something to read. 1056 @type read_freq: int 1057 @param threshold: File descriptor will be read only if the accumulated 1058 size to read becomes >= threshold. If != 0, you likely 1059 want to use it in combination with an appropriate 1060 value for read_freq because without that you would 1061 keep looping without really reading anything and that 1062 until the amount of events to read is >= threshold. 1063 At least with read_freq set you might sleep. 1064 @type threshold: int 1065 @param timeout: 1066 http://docs.python.org/lib/poll-objects.html#poll-objects 1067 @type timeout: int 1068 """ 1069 # Watch Manager instance 1070 self._watch_manager = watch_manager 1071 # File descriptor 1072 self._fd = self._watch_manager.get_fd() 1073 # Poll object and registration 1074 self._pollobj = select.poll() 1075 self._pollobj.register(self._fd, select.POLLIN) 1076 # This pipe is correctely initialized and used by ThreadedNotifier 1077 self._pipe = (-1, -1) 1078 # Event queue 1079 self._eventq = deque() 1080 # System processing functor, common to all events 1081 self._sys_proc_fun = _SysProcessEvent(self._watch_manager, self) 1082 # Default processing method 1083 self._default_proc_fun = default_proc_fun 1084 # Loop parameters 1085 self._read_freq = read_freq 1086 self._threshold = threshold 1087 self._timeout = timeout
1088
1089 - def append_event(self, event):
1090 """ 1091 Append a raw event to the event queue. 1092 1093 @param event: An event. 1094 @type event: _RawEvent instance. 1095 """ 1096 self._eventq.append(event)
1097
1098 - def proc_fun(self):
1099 return self._default_proc_fun
1100
1101 - def check_events(self, timeout=None):
1102 """ 1103 Check for new events available to read, blocks up to timeout 1104 milliseconds. 1105 1106 @param timeout: If specified it overrides the corresponding instance 1107 attribute _timeout. 1108 @type timeout: int 1109 1110 @return: New events to read. 1111 @rtype: bool 1112 """ 1113 while True: 1114 try: 1115 # blocks up to 'timeout' milliseconds 1116 if timeout is None: 1117 timeout = self._timeout 1118 ret = self._pollobj.poll(timeout) 1119 except select.error, err: 1120 if err[0] == errno.EINTR: 1121 continue # interrupted, retry 1122 else: 1123 raise 1124 else: 1125 break 1126 1127 if not ret or (self._pipe[0] == ret[0][0]): 1128 return False 1129 # only one fd is polled 1130 return ret[0][1] & select.POLLIN
1131
1132 - def read_events(self):
1133 """ 1134 Read events from device, build _RawEvents, and enqueue them. 1135 """ 1136 buf_ = array.array('i', [0]) 1137 # get event queue size 1138 if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: 1139 return 1140 queue_size = buf_[0] 1141 if queue_size < self._threshold: 1142 log.debug('(fd: %d) %d bytes available to read but threshold is ' 1143 'fixed to %d bytes', self._fd, queue_size, 1144 self._threshold) 1145 return 1146 1147 try: 1148 # Read content from file 1149 r = os.read(self._fd, queue_size) 1150 except Exception, msg: 1151 raise NotifierError(msg) 1152 log.debug('Event queue size: %d', queue_size) 1153 rsum = 0 # counter 1154 while rsum < queue_size: 1155 s_size = 16 1156 # Retrieve wd, mask, cookie 1157 s_ = struct.unpack('iIII', r[rsum:rsum+s_size]) 1158 # Length of name 1159 fname_len = s_[3] 1160 # field 'length' useless 1161 s_ = s_[:-1] 1162 # Retrieve name 1163 s_ += struct.unpack('%ds' % fname_len, 1164 r[rsum + s_size:rsum + s_size + fname_len]) 1165 self._eventq.append(_RawEvent(*s_)) 1166 rsum += s_size + fname_len
1167
1168 - def process_events(self):
1169 """ 1170 Routine for processing events from queue by calling their 1171 associated proccessing method (an instance of ProcessEvent). 1172 It also does internal processings, to keep the system updated. 1173 """ 1174 while self._eventq: 1175 raw_event = self._eventq.popleft() # pop next event 1176 watch_ = self._watch_manager.get_watch(raw_event.wd) 1177 revent = self._sys_proc_fun(raw_event) # system processings 1178 if watch_ and watch_.proc_fun: 1179 watch_.proc_fun(revent) # user processings 1180 else: 1181 self._default_proc_fun(revent) 1182 self._sys_proc_fun.cleanup() # remove olds MOVED_* events records
1183 1184
1185 - def __daemonize(self, pid_file=None, force_kill=False, stdin=os.devnull, 1186 stdout=os.devnull, stderr=os.devnull):
1187 """ 1188 pid_file: file to which the pid will be written. 1189 force_kill: if True kill the process associated to pid_file. 1190 stdin, stdout, stderr: files associated to common streams. 1191 """ 1192 if pid_file is None: 1193 dirname = '/var/run/' 1194 basename = sys.argv[0] or 'pyinotify' 1195 pid_file = os.path.join(dirname, basename + '.pid') 1196 1197 if os.path.exists(pid_file): 1198 fo = file(pid_file, 'rb') 1199 try: 1200 try: 1201 pid = int(fo.read()) 1202 except ValueError: 1203 pid = None 1204 if pid is not None: 1205 try: 1206 os.kill(pid, 0) 1207 except OSError, err: 1208 log.error(err) 1209 else: 1210 if not force_kill: 1211 s = 'There is already a pid file %s with pid %d' 1212 raise NotifierError(s % (pid_file, pid)) 1213 else: 1214 os.kill(pid, 9) 1215 finally: 1216 fo.close() 1217 1218 1219 def fork_daemon(): 1220 # Adapted from Chad J. Schroeder's recipe 1221 # @see http://code.activestate.com/recipes/278731/ 1222 pid = os.fork() 1223 if (pid == 0): 1224 # parent 2 1225 os.setsid() 1226 pid = os.fork() 1227 if (pid == 0): 1228 # child 1229 os.chdir('/') 1230 os.umask(0) 1231 else: 1232 # parent 2 1233 os._exit(0) 1234 else: 1235 # parent 1 1236 os._exit(0) 1237 1238 fd_inp = os.open(stdin, os.O_RDONLY) 1239 os.dup2(fd_inp, 0) 1240 fd_out = os.open(stdout, os.O_WRONLY|os.O_CREAT) 1241 os.dup2(fd_out, 1) 1242 fd_err = os.open(stderr, os.O_WRONLY|os.O_CREAT) 1243 os.dup2(fd_err, 2)
1244 1245 # Detach task 1246 fork_daemon() 1247 1248 # Write pid 1249 file_obj = file(pid_file, 'wb') 1250 try: 1251 file_obj.write(str(os.getpid()) + '\n') 1252 finally: 1253 file_obj.close() 1254 1255 atexit.register(lambda : os.unlink(pid_file))
1256 1257
1258 - def _sleep(self, ref_time):
1259 # Only consider sleeping if read_freq is > 0 1260 if self._read_freq > 0: 1261 cur_time = time.time() 1262 sleep_amount = self._read_freq - (cur_time - ref_time) 1263 if sleep_amount > 0: 1264 log.debug('Now sleeping %d seconds', sleep_amount) 1265 time.sleep(sleep_amount)
1266 1267
1268 - def loop(self, callback=None, daemonize=False, **args):
1269 """ 1270 Events are read only once time every min(read_freq, timeout) 1271 seconds at best and only if the size to read is >= threshold. 1272 1273 @param callback: Functor called after each event processing. Expects 1274 to receive notifier object (self) as first parameter. 1275 @type callback: callable 1276 @param daemonize: This thread is daemonized if set to True. 1277 @type daemonize: boolean 1278 """ 1279 if daemonize: 1280 self.__daemonize(**args) 1281 1282 # Read and process events forever 1283 while 1: 1284 try: 1285 self.process_events() 1286 if callback is not None: 1287 callback(self) 1288 ref_time = time.time() 1289 # check_events is blocking 1290 if self.check_events(): 1291 self._sleep(ref_time) 1292 self.read_events() 1293 except KeyboardInterrupt: 1294 # Unless sigint is caught (Control-C) 1295 log.debug('Pyinotify stops monitoring.') 1296 # Stop monitoring 1297 self.stop() 1298 break
1299
1300 - def stop(self):
1301 """ 1302 Close inotify's instance (close its file descriptor). 1303 It destroys all existing watches, pending events,... 1304 """ 1305 self._pollobj.unregister(self._fd) 1306 os.close(self._fd)
1307
1308 1309 -class ThreadedNotifier(threading.Thread, Notifier):
1310 """ 1311 This notifier inherits from threading.Thread for instanciating a separate 1312 thread, and also inherits from Notifier, because it is a threaded notifier. 1313 1314 Note that every functionality provided by this class is also provided 1315 through Notifier class. Moreover Notifier should be considered first because 1316 it is not threaded and could be easily daemonized. 1317 """
1318 - def __init__(self, watch_manager, default_proc_fun=ProcessEvent(), 1319 read_freq=0, threshold=0, timeout=None):
1320 """ 1321 Initialization, initialize base classes. read_freq, threshold and 1322 timeout parameters are used when looping. 1323 1324 @param watch_manager: Watch Manager. 1325 @type watch_manager: WatchManager instance 1326 @param default_proc_fun: Default processing method. 1327 @type default_proc_fun: instance of ProcessEvent 1328 @param read_freq: if read_freq == 0, events are read asap, 1329 if read_freq is > 0, this thread sleeps 1330 max(0, read_freq - timeout) seconds. 1331 @type read_freq: int 1332 @param threshold: File descriptor will be read only if the accumulated 1333 size to read becomes >= threshold. If != 0, you likely 1334 want to use it in combination with an appropriate 1335 value set for read_freq because without that you would 1336 keep looping without really reading anything and that 1337 until the amount of events to read is >= threshold. At 1338 least with read_freq you might sleep. 1339 @type threshold: int 1340 @param timeout: 1341 see http://docs.python.org/lib/poll-objects.html#poll-objects 1342 @type timeout: int 1343 """ 1344 # Init threading base class 1345 threading.Thread.__init__(self) 1346 # Stop condition 1347 self._stop_event = threading.Event() 1348 # Init Notifier base class 1349 Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, 1350 threshold, timeout) 1351 # Create a new pipe used for thread termination 1352 self._pipe = os.pipe() 1353 self._pollobj.register(self._pipe[0], select.POLLIN)
1354
1355 - def stop(self):
1356 """ 1357 Stop notifier's loop. Stop notification. Join the thread. 1358 """ 1359 self._stop_event.set() 1360 os.write(self._pipe[1], 'stop') 1361 threading.Thread.join(self) 1362 Notifier.stop(self) 1363 self._pollobj.unregister(self._pipe[0]) 1364 os.close(self._pipe[0]) 1365 os.close(self._pipe[1])
1366
1367 - def loop(self):
1368 """ 1369 Thread's main loop. Don't meant to be called by user directly. 1370 Call inherited start() method instead. 1371 1372 Events are read only once time every min(read_freq, timeout) 1373 seconds at best and only if the size of events to read is >= threshold. 1374 """ 1375 # When the loop must be terminated .stop() is called, 'stop' 1376 # is written to pipe fd so poll() returns and .check_events() 1377 # returns False which make evaluate the While's stop condition 1378 # ._stop_event.isSet() wich put an end to the thread's execution. 1379 while not self._stop_event.isSet(): 1380 self.process_events() 1381 ref_time = time.time() 1382 if self.check_events(): 1383 self._sleep(ref_time) 1384 self.read_events()
1385
1386 - def run(self):
1387 """ 1388 Start thread's loop: read and process events until the method 1389 stop() is called. 1390 Never call this method directly, instead call the start() method 1391 inherited from threading.Thread, which then will call run() in 1392 its turn. 1393 """ 1394 self.loop()
1395
1396 1397 -class AsyncNotifier(asyncore.file_dispatcher, Notifier):
1398 """ 1399 This notifier inherits from asyncore.file_dispatcher in order to be able to 1400 use pyinotify along with the asyncore framework. 1401 1402 """
1403 - def __init__(self, watch_manager, default_proc_fun=ProcessEvent(), 1404 read_freq=0, threshold=0, timeout=None, channel_map=None):
1405 """ 1406 Initializes the async notifier. The only additional parameter is 1407 'channel_map' which is the optional asyncore private map. See 1408 Notifier class for the meaning of the others parameters. 1409 1410 """ 1411 Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, 1412 threshold, timeout) 1413 asyncore.file_dispatcher.__init__(self, self._fd, channel_map)
1414
1415 - def handle_read(self):
1416 """ 1417 When asyncore tells us we can read from the fd, we proceed processing 1418 events. This method can be overridden for handling a notification 1419 differently. 1420 1421 """ 1422 self.read_events() 1423 self.process_events()
1424
1425 1426 -class Watch:
1427 """ 1428 Represent a watch, i.e. a file or directory being watched. 1429 1430 """
1431 - def __init__(self, **keys):
1432 """ 1433 Initializations. 1434 1435 @param wd: Watch descriptor. 1436 @type wd: int 1437 @param path: Path of the file or directory being watched. 1438 @type path: str 1439 @param mask: Mask. 1440 @type mask: int 1441 @param proc_fun: Processing callable object. 1442 @type proc_fun: 1443 @param auto_add: Automatically add watches on new directories. 1444 @type auto_add: bool 1445 """ 1446 for k, v in keys.iteritems(): 1447 setattr(self, k, v) 1448 self.dir = os.path.isdir(self.path)
1449
1450 - def __repr__(self):
1451 """ 1452 @return: String representation. 1453 @rtype: str 1454 """ 1455 s = ' '.join(['%s%s%s' % (Color.field_name(attr), 1456 Color.punctuation('='), 1457 Color.field_value(getattr(self, attr))) \ 1458 for attr in self.__dict__ if not attr.startswith('_')]) 1459 1460 s = '%s%s %s %s' % (Color.punctuation('<'), 1461 Color.class_name(self.__class__.__name__), 1462 s, 1463 Color.punctuation('>')) 1464 return s
1465
1466 1467 -class ExcludeFilter:
1468 """ 1469 ExcludeFilter is an exclusion filter. 1470 """
1471 - def __init__(self, arg_lst):
1472 """ 1473 @param arg_lst: is either a list or dict of patterns: 1474 [pattern1, ..., patternn] 1475 {'filename1': (list1, listn), ...} where list1 is 1476 a list of patterns 1477 @type arg_lst: list or dict 1478 """ 1479 if isinstance(arg_lst, dict): 1480 lst = self._load_patterns(arg_lst) 1481 elif isinstance(arg_lst, list): 1482 lst = arg_lst 1483 else: 1484 raise TypeError 1485 1486 self._lregex = [] 1487 for regex in lst: 1488 self._lregex.append(re.compile(regex, re.UNICODE))
1489
1490 - def _load_patterns(self, dct):
1491 lst = [] 1492 for path, varnames in dct.iteritems(): 1493 loc = {} 1494 execfile(path, {}, loc) 1495 for varname in varnames: 1496 lst.extend(loc.get(varname, [])) 1497 return lst
1498
1499 - def _match(self, regex, path):
1500 return regex.match(path) is not None
1501
1502 - def __call__(self, path):
1503 """ 1504 @param path: Path to match against provided regexps. 1505 @type path: str 1506 @return: Return True if path has been matched and should 1507 be excluded, False otherwise. 1508 @rtype: bool 1509 """ 1510 for regex in self._lregex: 1511 if self._match(regex, path): 1512 return True 1513 return False
1514
1515 1516 -class WatchManagerError(Exception):
1517 """ 1518 WatchManager Exception. Raised on error encountered on watches 1519 operations. 1520 1521 """
1522 - def __init__(self, msg, wmd):
1523 """ 1524 @param msg: Exception string's description. 1525 @type msg: string 1526 @param wmd: This dictionary contains the wd assigned to paths of the 1527 same call for which watches were successfully added. 1528 @type wmd: dict 1529 """ 1530 self.wmd = wmd 1531 Exception.__init__(self, msg)
1532
1533 1534 -class WatchManager:
1535 """ 1536 Provide operations for watching files and directories. Its internal 1537 dictionary is used to reference watched items. When used inside 1538 threaded code, one must instanciate as many WatchManager instances as 1539 there are ThreadedNotifier instances. 1540 1541 """
1542 - def __init__(self, exclude_filter=lambda path: False):
1543 """ 1544 Initialization: init inotify, init watch manager dictionary. 1545 Raise OSError if initialization fails. 1546 1547 @param exclude_filter: boolean function, returns True if current 1548 path must be excluded from being watched. 1549 Convenient for providing a common exclusion 1550 filter for every call to add_watch. 1551 @type exclude_filter: bool 1552 """ 1553 self._exclude_filter = exclude_filter 1554 self._wmd = {} # watch dict key: watch descriptor, value: watch 1555 self._fd = LIBC.inotify_init() # inotify's init, file descriptor 1556 if self._fd < 0: 1557 raise OSError()
1558
1559 - def get_fd(self):
1560 """ 1561 Return assigned inotify's file descriptor. 1562 1563 @return: File descriptor. 1564 @rtype: int 1565 """ 1566 return self._fd
1567
1568 - def get_watch(self, wd):
1569 """ 1570 Get watch from provided watch descriptor wd. 1571 1572 @param wd: Watch descriptor. 1573 @type wd: int 1574 """ 1575 return self._wmd.get(wd)
1576
1577 - def del_watch(self, wd):
1578 """ 1579 Remove watch entry associated to watch descriptor wd. 1580 1581 @param wd: Watch descriptor. 1582 @type wd: int 1583 """ 1584 try: 1585 del self._wmd[wd] 1586 except KeyError, err: 1587 log.error(str(err))
1588 1589 @property
1590 - def watches(self):
1591 """ 1592 Get a reference on the internal watch manager dictionary. 1593 1594 @return: Internal watch manager dictionary. 1595 @rtype: dict 1596 """ 1597 return self._wmd
1598
1599 - def __add_watch(self, path, mask, proc_fun, auto_add):
1600 """ 1601 Add a watch on path, build a Watch object and insert it in the 1602 watch manager dictionary. Return the wd value. 1603 """ 1604 # Unicode strings are converted to byte strings, it seems to be 1605 # required because LIBC.inotify_add_watch does not work well when 1606 # it receives an ctypes.create_unicode_buffer instance as argument. 1607 # Therefore even wd are indexed with bytes string and not with 1608 # unicode paths. 1609 if isinstance(path, unicode): 1610 byte_path = path.encode(sys.getfilesystemencoding()) 1611 else: 1612 byte_path = path 1613 1614 wd_ = LIBC.inotify_add_watch(self._fd, 1615 ctypes.create_string_buffer(byte_path), 1616 mask) 1617 if wd_ < 0: 1618 return wd_ 1619 watch_ = Watch(wd=wd_, path=os.path.normpath(byte_path), mask=mask, 1620 proc_fun=proc_fun, auto_add=auto_add) 1621 self._wmd[wd_] = watch_ 1622 log.debug('New %s', watch_) 1623 return wd_
1624
1625 - def __glob(self, path, do_glob):
1626 if do_glob: 1627 return iglob(path) 1628 else: 1629 return [path]
1630
1631 - def add_watch(self, path, mask, proc_fun=None, rec=False, 1632 auto_add=False, do_glob=False, quiet=True, 1633 exclude_filter=None):
1634 """ 1635 Add watch(s) on the provided |path|(s) with associated |mask| flag 1636 value and optionally with a processing |proc_fun| function and 1637 recursive flag |rec| set to True. 1638 Ideally |path| components should not be unicode objects. Note that 1639 although unicode paths are accepted there are converted to byte 1640 strings before a watch is put on that path. The encoding used for 1641 converting the unicode object is given by sys.getfilesystemencoding(). 1642 1643 @param path: Path to watch, the path can either be a file or a 1644 directory. Also accepts a sequence (list) of paths. 1645 @type path: string or list of strings 1646 @param mask: Bitmask of events. 1647 @type mask: int 1648 @param proc_fun: Processing object. 1649 @type proc_fun: function or ProcessEvent instance or instance of 1650 one of its subclasses or callable object. 1651 @param rec: Recursively add watches from path on all its 1652 subdirectories, set to False by default (doesn't 1653 follows symlinks in any case). 1654 @type rec: bool 1655 @param auto_add: Automatically add watches on newly created 1656 directories in watched parent |path| directory. 1657 @type auto_add: bool 1658 @param do_glob: Do globbing on pathname (see standard globbing 1659 module for more informations). 1660 @type do_glob: bool 1661 @param quiet: if False raises a WatchManagerError exception on 1662 error. See example not_quiet.py. 1663 @type quiet: bool 1664 @param exclude_filter: boolean function, returns True if current 1665 path must be excluded from being watched. 1666 Has precedence on exclude_filter defined 1667 into __init__. 1668 @type exclude_filter: bool 1669 @return: dict of paths associated to watch descriptors. A wd value 1670 is positive if the watch was added sucessfully, 1671 otherwise the value is negative. If the path was invalid 1672 it is not included into this returned dictionary. 1673 @rtype: dict of {str: int} 1674 """ 1675 ret_ = {} # return {path: wd, ...} 1676 1677 if exclude_filter is None: 1678 exclude_filter = self._exclude_filter 1679 1680 # normalize args as list elements 1681 for npath in self.__format_param(path): 1682 # unix pathname pattern expansion 1683 for apath in self.__glob(npath, do_glob): 1684 # recursively list subdirs according to rec param 1685 for rpath in self.__walk_rec(apath, rec): 1686 if not exclude_filter(rpath): 1687 wd = ret_[rpath] = self.__add_watch(rpath, mask, 1688 proc_fun, 1689 auto_add) 1690 if wd < 0: 1691 err = 'add_watch: cannot watch %s (WD=%d)' 1692 err = err % (rpath, wd) 1693 if quiet: 1694 log.error(err) 1695 else: 1696 raise WatchManagerError(err, ret_) 1697 else: 1698 # Let's say -2 means 'explicitely excluded 1699 # from watching'. 1700 ret_[rpath] = -2 1701 return ret_
1702
1703 - def __get_sub_rec(self, lpath):
1704 """ 1705 Get every wd from self._wmd if its path is under the path of 1706 one (at least) of those in lpath. Doesn't follow symlinks. 1707 1708 @param lpath: list of watch descriptor 1709 @type lpath: list of int 1710 @return: list of watch descriptor 1711 @rtype: list of int 1712 """ 1713 for d in lpath: 1714 root = self.get_path(d) 1715 if root: 1716 # always keep root 1717 yield d 1718 else: 1719 # if invalid 1720 continue 1721 1722 # nothing else to expect 1723 if not os.path.isdir(root): 1724 continue 1725 1726 # normalization 1727 root = os.path.normpath(root) 1728 # recursion 1729 lend = len(root) 1730 for iwd in self._wmd.items(): 1731 cur = iwd[1].path 1732 pref = os.path.commonprefix([root, cur]) 1733 if root == os.sep or (len(pref) == lend and \ 1734 len(cur) > lend and \ 1735 cur[lend] == os.sep): 1736 yield iwd[1].wd
1737
1738 - def update_watch(self, wd, mask=None, proc_fun=None, rec=False, 1739 auto_add=False, quiet=True):
1740 """ 1741 Update existing watch descriptors |wd|. The |mask| value, the 1742 processing object |proc_fun|, the recursive param |rec| and the 1743 |auto_add| and |quiet| flags can all be updated. 1744 1745 @param wd: Watch Descriptor to update. Also accepts a list of 1746 watch descriptors. 1747 @type wd: int or list of int 1748 @param mask: Optional new bitmask of events. 1749 @type mask: int 1750 @param proc_fun: Optional new processing function. 1751 @type proc_fun: function or ProcessEvent instance or instance of 1752 one of its subclasses or callable object. 1753 @param rec: Optionally adds watches recursively on all 1754 subdirectories contained into |wd| directory. 1755 @type rec: bool 1756 @param auto_add: Automatically adds watches on newly created 1757 directories in the watch's path corresponding to 1758 |wd|. 1759 @type auto_add: bool 1760 @param quiet: If False raises a WatchManagerError exception on 1761 error. See example not_quiet.py 1762 @type quiet: bool 1763 @return: dict of watch descriptors associated to booleans values. 1764 True if the corresponding wd has been successfully 1765 updated, False otherwise. 1766 @rtype: dict of {int: bool} 1767 """ 1768 lwd = self.__format_param(wd) 1769 if rec: 1770 lwd = self.__get_sub_rec(lwd) 1771 1772 ret_ = {} # return {wd: bool, ...} 1773 for awd in lwd: 1774 apath = self.get_path(awd) 1775 if not apath or awd < 0: 1776 err = 'update_watch: invalid WD=%d' % awd 1777 if quiet: 1778 log.error(err) 1779 continue 1780 raise WatchManagerError(err, ret_) 1781 1782 if mask: 1783 addw = LIBC.inotify_add_watch 1784 wd_ = addw(self._fd, ctypes.create_string_buffer(apath), mask) 1785 if wd_ < 0: 1786 ret_[awd] = False 1787 err = 'update_watch: cannot update WD=%d (%s)' % (wd_, 1788 apath) 1789 if quiet: 1790 log.error(err) 1791 continue 1792 raise WatchManagerError(err, ret_) 1793 1794 assert(awd == wd_) 1795 1796 if proc_fun or auto_add: 1797 watch_ = self._wmd[awd] 1798 1799 if proc_fun: 1800 watch_.proc_fun = proc_fun 1801 1802 if auto_add: 1803 watch_.proc_fun = auto_add 1804 1805 ret_[awd] = True 1806 log.debug('Updated watch - %s', self._wmd[awd]) 1807 return ret_
1808
1809 - def __format_param(self, param):
1810 """ 1811 @param param: Parameter. 1812 @type param: string or int 1813 @return: wrap param. 1814 @rtype: list of type(param) 1815 """ 1816 if isinstance(param, list): 1817 for p_ in param: 1818 yield p_ 1819 else: 1820 yield param
1821
1822 - def get_wd(self, path):
1823 """ 1824 Returns the watch descriptor associated to path. This method 1825 presents a prohibitive cost, always prefer to keep the WD 1826 returned by add_watch(). If the path is unknown it returns None. 1827 1828 @param path: Path. 1829 @type path: str 1830 @return: WD or None. 1831 @rtype: int or None 1832 """ 1833 path = os.path.normpath(path) 1834 for iwd in self._wmd.iteritems(): 1835 if iwd[1].path == path: 1836 return iwd[0] 1837 log.debug('get_wd: unknown path %s', path)
1838
1839 - def get_path(self, wd):
1840 """ 1841 Returns the path associated to WD, if WD is unknown it returns None. 1842 1843 @param wd: Watch descriptor. 1844 @type wd: int 1845 @return: Path or None. 1846 @rtype: string or None 1847 """ 1848 watch_ = self._wmd.get(wd) 1849 if watch_: 1850 return watch_.path 1851 log.debug('get_path: unknown WD %d', wd)
1852
1853 - def __walk_rec(self, top, rec):
1854 """ 1855 Yields each subdirectories of top, doesn't follow symlinks. 1856 If rec is false, only yield top. 1857 1858 @param top: root directory. 1859 @type top: string 1860 @param rec: recursive flag. 1861 @type rec: bool 1862 @return: path of one subdirectory. 1863 @rtype: string 1864 """ 1865 if not rec or os.path.islink(top) or not os.path.isdir(top): 1866 yield top 1867 else: 1868 for root, dirs, files in os.walk(top): 1869 yield root
1870
1871 - def rm_watch(self, wd, rec=False, quiet=True):
1872 """ 1873 Removes watch(s). 1874 1875 @param wd: Watch Descriptor of the file or directory to unwatch. 1876 Also accepts a list of WDs. 1877 @type wd: int or list of int. 1878 @param rec: Recursively removes watches on every already watched 1879 subdirectories and subfiles. 1880 @type rec: bool 1881 @param quiet: If False raises a WatchManagerError exception on 1882 error. See example not_quiet.py 1883 @type quiet: bool 1884 @return: dict of watch descriptors associated to booleans values. 1885 True if the corresponding wd has been successfully 1886 removed, False otherwise. 1887 @rtype: dict of {int: bool} 1888 """ 1889 lwd = self.__format_param(wd) 1890 if rec: 1891 lwd = self.__get_sub_rec(lwd) 1892 1893 ret_ = {} # return {wd: bool, ...} 1894 for awd in lwd: 1895 # remove watch 1896 wd_ = LIBC.inotify_rm_watch(self._fd, awd) 1897 if wd_ < 0: 1898 ret_[awd] = False 1899 err = 'rm_watch: cannot remove WD=%d' % awd 1900 if quiet: 1901 log.error(err) 1902 continue 1903 raise WatchManagerError(err, ret_) 1904 1905 ret_[awd] = True 1906 log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) 1907 return ret_
1908 1909
1910 - def watch_transient_file(self, filename, mask, proc_class):
1911 """ 1912 Watch a transient file, which will be created and deleted frequently 1913 over time (e.g. pid file). 1914 1915 @attention: Currently under the call to this function it is not 1916 possible to correctly watch the events triggered into the same 1917 base directory than the directory where is located this watched 1918 transient file. For instance it would be wrong to make these 1919 two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...) 1920 and wm.add_watch('/var/run/', ...) 1921 1922 @param filename: Filename. 1923 @type filename: string 1924 @param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE. 1925 @type mask: int 1926 @param proc_class: ProcessEvent (or of one of its subclass), beware of 1927 accepting a ProcessEvent's instance as argument into 1928 __init__, see transient_file.py example for more 1929 details. 1930 @type proc_class: ProcessEvent's instance or of one of its subclasses. 1931 @return: Same as add_watch(). 1932 @rtype: Same as add_watch(). 1933 """ 1934 dirname = os.path.dirname(filename) 1935 if dirname == '': 1936 return {} # Maintains coherence with add_watch() 1937 basename = os.path.basename(filename) 1938 # Assuming we are watching at least for IN_CREATE and IN_DELETE 1939 mask |= IN_CREATE | IN_DELETE 1940 1941 def cmp_name(event): 1942 return basename == event.name
1943 return self.add_watch(dirname, mask, 1944 proc_fun=proc_class(ChainIfTrue(func=cmp_name)), 1945 rec=False, 1946 auto_add=False, do_glob=False)
1947
1948 1949 -class Color:
1950 """ 1951 Internal class. Provide fancy colors used by string representations. 1952 """ 1953 normal = "\033[0m" 1954 black = "\033[30m" 1955 red = "\033[31m" 1956 green = "\033[32m" 1957 yellow = "\033[33m" 1958 blue = "\033[34m" 1959 purple = "\033[35m" 1960 cyan = "\033[36m" 1961 bold = "\033[1m" 1962 uline = "\033[4m" 1963 blink = "\033[5m" 1964 invert = "\033[7m" 1965 1966 @staticmethod
1967 - def punctuation(s):
1968 """Punctuation color.""" 1969 return Color.normal + s + Color.normal
1970 1971 @staticmethod
1972 - def field_value(s):
1973 """Field value color.""" 1974 if not isinstance(s, basestring): 1975 s = str(s) 1976 return Color.purple + s + Color.normal
1977 1978 @staticmethod
1979 - def field_name(s):
1980 """Field name color.""" 1981 return Color.blue + s + Color.normal
1982 1983 @staticmethod
1984 - def class_name(s):
1985 """Class name color.""" 1986 return Color.red + Color.bold + s + Color.normal
1987 1988 @staticmethod
1989 - def simple(s, color):
1990 if not isinstance(s, basestring): 1991 s = str(s) 1992 try: 1993 color_attr = getattr(Color, color) 1994 except AttributeError: 1995 return s 1996 return color_attr + s + Color.normal
1997
1998 1999 -def compatibility_mode():
2000 """ 2001 Use this function to turn on the compatibility mode. The compatibility 2002 mode is used to improve compatibility with Pyinotify 0.7.1 (or older) 2003 programs. The compatibility mode provides additional variables 'is_dir', 2004 'event_name', 'EventsCodes.IN_*' and 'EventsCodes.ALL_EVENTS' as 2005 Pyinotify 0.7.1 provided. Do not call this function from new programs!! 2006 Especially if there are developped for Pyinotify >= 0.8.x. 2007 """ 2008 setattr(EventsCodes, 'ALL_EVENTS', ALL_EVENTS) 2009 for evname in globals(): 2010 if evname.startswith('IN_'): 2011 setattr(EventsCodes, evname, globals()[evname]) 2012 global COMPATIBILITY_MODE 2013 COMPATIBILITY_MODE = True
2014
2015 2016 -def command_line():
2017 """ 2018 By default the watched path is '/tmp' and all types of events are 2019 monitored. Events monitoring serves forever, type c^c to stop it. 2020 """ 2021 from optparse import OptionParser 2022 2023 usage = "usage: %prog [options] [path1] [path2] [pathn]" 2024 2025 parser = OptionParser(usage=usage) 2026 parser.add_option("-v", "--verbose", action="store_true", 2027 dest="verbose", help="Verbose mode") 2028 parser.add_option("-r", "--recursive", action="store_true", 2029 dest="recursive", 2030 help="Add watches recursively on paths") 2031 parser.add_option("-a", "--auto_add", action="store_true", 2032 dest="auto_add", 2033 help="Automatically add watches on new directories") 2034 parser.add_option("-e", "--events-list", metavar="EVENT[,...]", 2035 dest="events_list", 2036 help=("A comma-separated list of events to watch for - " 2037 "see the documentation for valid options (defaults" 2038 " to everything)")) 2039 parser.add_option("-s", "--stats", action="store_true", 2040 dest="stats", 2041 help="Display dummy statistics") 2042 2043 (options, args) = parser.parse_args() 2044 2045 if options.verbose: 2046 log.setLevel(10) 2047 2048 if len(args) < 1: 2049 path = '/tmp' # default watched path 2050 else: 2051 path = args 2052 2053 # watch manager instance 2054 wm = WatchManager() 2055 # notifier instance and init 2056 if options.stats: 2057 notifier = Notifier(wm, default_proc_fun=Stats(), read_freq=5) 2058 else: 2059 notifier = Notifier(wm, default_proc_fun=PrintAllEvents()) 2060 2061 # What mask to apply 2062 mask = 0 2063 if options.events_list: 2064 events_list = options.events_list.split(',') 2065 for ev in events_list: 2066 evcode = EventsCodes.ALL_FLAGS.get(ev, 0) 2067 if evcode: 2068 mask |= evcode 2069 else: 2070 parser.error("The event '%s' specified with option -e" 2071 " is not valid" % ev) 2072 else: 2073 mask = ALL_EVENTS 2074 2075 # stats 2076 cb_fun = None 2077 if options.stats: 2078 def cb(s): 2079 print('%s\n%s\n' % (repr(s.proc_fun()), 2080 s.proc_fun()))
2081 cb_fun = cb 2082 2083 log.debug('Start monitoring %s, (press c^c to halt pyinotify)' % path) 2084 2085 wm.add_watch(path, mask, rec=options.recursive, auto_add=options.auto_add) 2086 # Loop forever (until sigint signal get caught) 2087 notifier.loop(callback=cb_fun) 2088 2089 2090 if __name__ == '__main__': 2091 command_line() 2092