Package mvpa :: Package measures :: Module base
[hide private]
[frames] | no frames]

Source Code for Module mvpa.measures.base

  1  #emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- 
  2  #ex: set sts=4 ts=4 sw=4 et: 
  3  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  4  # 
  5  #   See COPYING file distributed along with the PyMVPA package for the 
  6  #   copyright and license terms. 
  7  # 
  8  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  9  """Base class for data measures: algorithms that quantify properties of 
 10  datasets. 
 11   
 12  Besides the `DatasetMeasure` base class this module also provides the 
 13  (abstract) `FeaturewiseDatasetMeasure` class. The difference between a general 
 14  measure and the output of the `FeaturewiseDatasetMeasure` is that the latter 
 15  returns a 1d map (one value per feature in the dataset). In contrast there are 
 16  no restrictions on the returned value of `DatasetMeasure` except for that it 
 17  has to be in some iterable container. 
 18   
 19  """ 
 20   
 21  __docformat__ = 'restructuredtext' 
 22   
 23  import numpy as N 
 24  import mvpa.support.copy as copy 
 25   
 26  from mvpa.misc.state import StateVariable, Stateful 
 27  from mvpa.misc.args import group_kwargs 
 28  from mvpa.misc.transformers import FirstAxisMean, SecondAxisSumOfAbs 
 29  from mvpa.base.dochelpers import enhancedDocString 
 30  from mvpa.base import externals 
 31  from mvpa.clfs.stats import autoNullDist 
 32   
 33  if __debug__: 
 34      from mvpa.base import debug 
 35   
 36   
37 -class DatasetMeasure(Stateful):
38 """A measure computed from a `Dataset` 39 40 All dataset measures support arbitrary transformation of the measure 41 after it has been computed. Transformation are done by processing the 42 measure with a functor that is specified via the `transformer` keyword 43 argument of the constructor. Upon request, the raw measure (before 44 transformations are applied) is stored in the `raw_result` state variable. 45 46 Additionally all dataset measures support the estimation of the 47 probabilit(y,ies) of a measure under some distribution. Typically this will 48 be the NULL distribution (no signal), that can be estimated with 49 permutation tests. If a distribution estimator instance is passed to the 50 `null_dist` keyword argument of the constructor the respective 51 probabilities are automatically computed and stored in the `null_prob` 52 state variable. 53 54 .. note:: 55 For developers: All subclasses shall get all necessary parameters via 56 their constructor, so it is possible to get the same type of measure for 57 multiple datasets by passing them to the __call__() method successively. 58 """ 59 60 raw_result = StateVariable(enabled=False, 61 doc="Computed results before applying any " + 62 "transformation algorithm") 63 null_prob = StateVariable(enabled=True) 64 """Stores the probability of a measure under the NULL hypothesis""" 65 null_t = StateVariable(enabled=False) 66 """Stores the t-score corresponding to null_prob under assumption 67 of Normal distribution""" 68
69 - def __init__(self, transformer=None, null_dist=None, **kwargs):
70 """Does nothing special. 71 72 :Parameter: 73 transformer: Functor 74 This functor is called in `__call__()` to perform a final 75 processing step on the to be returned dataset measure. If None, 76 nothing is called 77 null_dist: instance of distribution estimator 78 The estimated distribution is used to assign a probability for a 79 certain value of the computed measure. 80 """ 81 Stateful.__init__(self, **kwargs) 82 83 self.__transformer = transformer 84 """Functor to be called in return statement of all subclass __call__() 85 methods.""" 86 null_dist_ = autoNullDist(null_dist) 87 if __debug__: 88 debug('SA', 'Assigning null_dist %s whenever original given was %s' 89 % (null_dist_, null_dist)) 90 self.__null_dist = null_dist_
91 92 93 __doc__ = enhancedDocString('DatasetMeasure', locals(), Stateful) 94 95
96 - def __call__(self, dataset):
97 """Compute measure on a given `Dataset`. 98 99 Each implementation has to handle a single arguments: the source 100 dataset. 101 102 Returns the computed measure in some iterable (list-like) 103 container applying transformer if such is defined 104 """ 105 result = self._call(dataset) 106 result = self._postcall(dataset, result) 107 return result
108 109
110 - def _call(self, dataset):
111 """Actually compute measure on a given `Dataset`. 112 113 Each implementation has to handle a single arguments: the source 114 dataset. 115 116 Returns the computed measure in some iterable (list-like) container. 117 """ 118 raise NotImplemented
119 120
121 - def _postcall(self, dataset, result):
122 """Some postprocessing on the result 123 """ 124 self.raw_result = result 125 if not self.__transformer is None: 126 if __debug__: 127 debug("SA_", "Applying transformer %s" % self.__transformer) 128 result = self.__transformer(result) 129 130 # estimate the NULL distribution when functor is given 131 if not self.__null_dist is None: 132 if __debug__: 133 debug("SA_", "Estimating NULL distribution using %s" 134 % self.__null_dist) 135 136 # we need a matching datameasure instance, but we have to disable 137 # the estimation of the null distribution in that child to prevent 138 # infinite looping. 139 measure = copy.copy(self) 140 measure.__null_dist = None 141 self.__null_dist.fit(measure, dataset) 142 143 if self.states.isEnabled('null_t'): 144 # get probability under NULL hyp, but also request 145 # either it belong to the right tail 146 null_prob, null_right_tail = \ 147 self.__null_dist.p(result, return_tails=True) 148 self.null_prob = null_prob 149 150 externals.exists('scipy', raiseException=True) 151 from scipy.stats import norm 152 153 # TODO: following logic should appear in NullDist, 154 # not here 155 tail = self.null_dist.tail 156 if tail == 'left': 157 acdf = N.abs(null_prob) 158 elif tail == 'right': 159 acdf = 1.0 - N.abs(null_prob) 160 elif tail in ['any', 'both']: 161 acdf = 1.0 - N.clip(N.abs(null_prob), 0, 0.5) 162 else: 163 raise RuntimeError, 'Unhandled tail %s' % tail 164 # We need to clip to avoid non-informative inf's ;-) 165 # that happens due to lack of precision in mantissa 166 # which is 11 bits in double. We could clip values 167 # around 0 at as low as 1e-100 (correspond to z~=21), 168 # but for consistency lets clip at 1e-16 which leads 169 # to distinguishable value around p=1 and max z=8.2. 170 # Should be sufficient range of z-values ;-) 171 clip = 1e-16 172 null_t = norm.ppf(N.clip(acdf, clip, 1.0 - clip)) 173 null_t[~null_right_tail] *= -1.0 # revert sign for negatives 174 self.null_t = null_t # store 175 else: 176 # get probability of result under NULL hypothesis if available 177 # and don't request tail information 178 self.null_prob = self.__null_dist.p(result) 179 180 return result
181 182
183 - def __repr__(self, prefixes=[]):
184 """String representation of DatasetMeasure 185 186 Includes only arguments which differ from default ones 187 """ 188 prefixes = prefixes[:] 189 if self.__transformer is not None: 190 prefixes.append("transformer=%s" % self.__transformer) 191 if self.__null_dist is not None: 192 prefixes.append("null_dist=%s" % self.__null_dist) 193 return super(DatasetMeasure, self).__repr__(prefixes=prefixes)
194 195 196 @property
197 - def null_dist(self):
198 """Return Null Distribution estimator""" 199 return self.__null_dist
200 201 @property
202 - def transformer(self):
203 """Return transformer""" 204 return self.__transformer
205 206
207 -class FeaturewiseDatasetMeasure(DatasetMeasure):
208 """A per-feature-measure computed from a `Dataset` (base class). 209 210 Should behave like a DatasetMeasure. 211 """ 212 213 base_sensitivities = StateVariable(enabled=False, 214 doc="Stores basic sensitivities if the sensitivity " + 215 "relies on combining multiple ones") 216 217 # XXX should we may be default to combiner=None to avoid 218 # unexpected results? Also rethink if we need combiner here at 219 # all... May be combiners should be 'adjoint' with transformer 220 # YYY in comparison to CombinedSensitivityAnalyzer here default 221 # value for combiner is worse than anywhere. From now on, 222 # default combiners should be provided "in place", ie 223 # in SMLR it makes sense to have SecondAxisMaxOfAbs, 224 # in SVM (pair-wise) only for not-binary should be 225 # SecondAxisSumOfAbs, though could be Max as well... uff 226 # YOH: started to do so, but still have issues... thus 227 # reverting back for now
228 - def __init__(self, combiner=SecondAxisSumOfAbs, **kwargs): # SecondAxisSumOfAbs
229 """Initialize 230 231 :Parameters: 232 combiner : Functor 233 The combiner is only applied if the computed featurewise dataset 234 measure is more than one-dimensional. This is different from a 235 `transformer`, which is always applied. By default, the sum of 236 absolute values along the second axis is computed. 237 """ 238 DatasetMeasure.__init__(self, **kwargs) 239 240 self.__combiner = combiner
241
242 - def __repr__(self, prefixes=None):
243 if prefixes is None: 244 prefixes = [] 245 if self.__combiner != SecondAxisSumOfAbs: 246 prefixes.append("combiner=%s" % self.__combiner) 247 return \ 248 super(FeaturewiseDatasetMeasure, self).__repr__(prefixes=prefixes)
249 250
251 - def _call(self, dataset):
252 """Computes a per-feature-measure on a given `Dataset`. 253 254 Behaves like a `DatasetMeasure`, but computes and returns a 1d ndarray 255 with one value per feature. 256 """ 257 raise NotImplementedError
258 259
260 - def _postcall(self, dataset, result):
261 """Adjusts per-feature-measure for computed `result` 262 263 264 TODO: overlaps in what it does heavily with 265 CombinedSensitivityAnalyzer, thus this one might make use of 266 CombinedSensitivityAnalyzer yoh thinks, and here 267 base_sensitivities doesn't sound appropriate. 268 MH: There is indeed some overlap, but also significant differences. 269 This one operates on a single sensana and combines over second 270 axis, CombinedFeaturewiseDatasetMeasure uses first axis. 271 Additionally, 'Sensitivity' base class is 272 FeaturewiseDatasetMeasures which would have to be changed to 273 CombinedFeaturewiseDatasetMeasure to deal with stuff like 274 SMLRWeights that return multiple sensitivity values by default. 275 Not sure if unification of both (and/or removal of functionality 276 here does not lead to an overall more complicated situation, 277 without any real gain -- after all this one works ;-) 278 """ 279 result_sq = result.squeeze() 280 if len(result_sq.shape)>1: 281 n_base = result.shape[1] 282 """Number of base sensitivities""" 283 if self.states.isEnabled('base_sensitivities'): 284 b_sensitivities = [] 285 if not self.states.isKnown('biases'): 286 biases = None 287 else: 288 biases = self.biases 289 if len(self.biases) != n_base: 290 raise ValueError, \ 291 "Number of biases %d is " % len(self.biases) \ 292 + "different from number of base sensitivities" \ 293 + "%d" % n_base 294 for i in xrange(n_base): 295 if not biases is None: 296 bias = biases[i] 297 else: 298 bias = None 299 b_sensitivities = StaticDatasetMeasure( 300 measure = result[:,i], 301 bias = bias) 302 self.base_sensitivities = b_sensitivities 303 304 # After we stored each sensitivity separately, 305 # we can apply combiner 306 if self.__combiner is not None: 307 result = self.__combiner(result) 308 else: 309 # remove bogus dimensions 310 # XXX we might need to come up with smth better. May be some naive 311 # combiner? :-) 312 result = result_sq 313 314 # call base class postcall 315 result = DatasetMeasure._postcall(self, dataset, result) 316 317 return result
318 319 @property
320 - def combiner(self):
321 """Return combiner""" 322 return self.__combiner
323 324 325
326 -class StaticDatasetMeasure(DatasetMeasure):
327 """A static (assigned) sensitivity measure. 328 329 Since implementation is generic it might be per feature or 330 per whole dataset 331 """ 332
333 - def __init__(self, measure=None, bias=None, *args, **kwargs):
334 """Initialize. 335 336 :Parameters: 337 measure 338 actual sensitivity to be returned 339 bias 340 optionally available bias 341 """ 342 DatasetMeasure.__init__(self, *args, **kwargs) 343 if measure is None: 344 raise ValueError, "Sensitivity measure has to be provided" 345 self.__measure = measure 346 self.__bias = bias
347
348 - def _call(self, dataset):
349 """Returns assigned sensitivity 350 """ 351 return self.__measure
352 353 #XXX Might need to move into StateVariable? 354 bias = property(fget=lambda self:self.__bias)
355 356 357 358 # 359 # Flavored implementations of FeaturewiseDatasetMeasures 360
361 -class Sensitivity(FeaturewiseDatasetMeasure):
362 363 _LEGAL_CLFS = [] 364 """If Sensitivity is classifier specific, classes of classifiers 365 should be listed in the list 366 """ 367
368 - def __init__(self, clf, force_training=True, **kwargs):
369 """Initialize the analyzer with the classifier it shall use. 370 371 :Parameters: 372 clf : :class:`Classifier` 373 classifier to use. 374 force_training : Bool 375 if classifier was already trained -- do not retrain 376 """ 377 378 """Does nothing special.""" 379 FeaturewiseDatasetMeasure.__init__(self, **kwargs) 380 381 _LEGAL_CLFS = self._LEGAL_CLFS 382 if len(_LEGAL_CLFS) > 0: 383 found = False 384 for clf_class in _LEGAL_CLFS: 385 if isinstance(clf, clf_class): 386 found = True 387 break 388 if not found: 389 raise ValueError, \ 390 "Classifier %s has to be of allowed class (%s), but is %s" \ 391 % (clf, _LEGAL_CLFS, `type(clf)`) 392 393 self.__clf = clf 394 """Classifier used to computed sensitivity""" 395 396 self._force_training = force_training 397 """Either to force it to train"""
398
399 - def __repr__(self, prefixes=None):
400 if prefixes is None: 401 prefixes = [] 402 prefixes.append("clf=%s" % repr(self.clf)) 403 if not self._force_training: 404 prefixes.append("force_training=%s" % self._force_training) 405 return super(Sensitivity, self).__repr__(prefixes=prefixes)
406 407
408 - def __call__(self, dataset=None):
409 """Train classifier on `dataset` and then compute actual sensitivity. 410 411 If the classifier is already trained it is possible to extract the 412 sensitivities without passing a dataset. 413 """ 414 # local bindings 415 clf = self.__clf 416 if not clf.trained or self._force_training: 417 if dataset is None: 418 raise ValueError, \ 419 "Training classifier to compute sensitivities requires " \ 420 "a dataset." 421 if __debug__: 422 debug("SA", "Training classifier %s %s" % 423 (`clf`, 424 {False: "since it wasn't yet trained", 425 True: "although it was trained previousely"} 426 [clf.trained])) 427 clf.train(dataset) 428 429 return FeaturewiseDatasetMeasure.__call__(self, dataset)
430 431
432 - def _setClassifier(self, clf):
433 self.__clf = clf
434 435 436 @property
437 - def feature_ids(self):
438 """Return feature_ids used by the underlying classifier 439 """ 440 return self.__clf._getFeatureIds()
441 442 443 clf = property(fget=lambda self:self.__clf, 444 fset=_setClassifier)
445 446 447
448 -class CombinedFeaturewiseDatasetMeasure(FeaturewiseDatasetMeasure):
449 """Set sensitivity analyzers to be merged into a single output""" 450 451 sensitivities = StateVariable(enabled=False, 452 doc="Sensitivities produced by each analyzer") 453 454 # XXX think again about combiners... now we have it in here and as 455 # well as in the parent -- FeaturewiseDatasetMeasure 456 # YYY because we don't use parent's _call. Needs RF
457 - def __init__(self, analyzers=None, # XXX should become actually 'measures' 458 combiner=None, #FirstAxisMean, 459 **kwargs):
460 """Initialize CombinedFeaturewiseDatasetMeasure 461 462 :Parameters: 463 analyzers : list or None 464 List of analyzers to be used. There is no logic to populate 465 such a list in __call__, so it must be either provided to 466 the constructor or assigned to .analyzers prior calling 467 """ 468 if analyzers is None: 469 analyzers = [] 470 471 FeaturewiseDatasetMeasure.__init__(self, **kwargs) 472 self.__analyzers = analyzers 473 """List of analyzers to use""" 474 475 self.__combiner = combiner 476 """Which functor to use to combine all sensitivities"""
477 478
479 - def _call(self, dataset):
480 sensitivities = [] 481 for ind,analyzer in enumerate(self.__analyzers): 482 if __debug__: 483 debug("SA", "Computing sensitivity for SA#%d:%s" % 484 (ind, analyzer)) 485 sensitivity = analyzer(dataset) 486 sensitivities.append(sensitivity) 487 488 self.sensitivities = sensitivities 489 if __debug__: 490 debug("SA", 491 "Returning combined using %s sensitivity across %d items" % 492 (self.__combiner, len(sensitivities))) 493 494 if self.__combiner is not None: 495 sensitivities = self.__combiner(sensitivities) 496 else: 497 # assure that we have an ndarray on output 498 sensitivities = N.asarray(sensitivities) 499 return sensitivities
500 501
502 - def _setAnalyzers(self, analyzers):
503 """Set the analyzers 504 """ 505 self.__analyzers = analyzers 506 """Analyzers to use"""
507 508 analyzers = property(fget=lambda x:x.__analyzers, 509 fset=_setAnalyzers, 510 doc="Used analyzers")
511 512 513 # XXX Why did we come to name everything analyzer? inputs of regular 514 # things like CombinedFeaturewiseDatasetMeasure can be simple 515 # measures.... 516
517 -class SplitFeaturewiseDatasetMeasure(FeaturewiseDatasetMeasure):
518 """Compute measures across splits for a specific analyzer""" 519 520 # XXX This beast is created based on code of 521 # CombinedFeaturewiseDatasetMeasure, thus another reason to refactor 522 523 sensitivities = StateVariable(enabled=False, 524 doc="Sensitivities produced for each split") 525 526 splits = StateVariable(enabled=False, doc= 527 """Store the actual splits of the data. Can be memory expensive""") 528
529 - def __init__(self, splitter, analyzer, 530 insplit_index=0, combiner=None, **kwargs):
531 """Initialize SplitFeaturewiseDatasetMeasure 532 533 :Parameters: 534 splitter : Splitter 535 Splitter to use to split the dataset 536 analyzer : DatasetMeasure 537 Measure to be used. Could be analyzer as well (XXX) 538 insplit_index : int 539 splitter generates tuples of dataset on each iteration 540 (usually 0th for training, 1st for testing). 541 On what split index in that tuple to operate. 542 """ 543 544 # XXX might want to extend insplit_index to handle 'all', so we store 545 # sensitivities for all parts of the splits... not sure if it is needed 546 547 # XXX We really think through whole transformer/combiners pipelining 548 549 # Here we provide combiner None since if needs to be combined 550 # within each sensitivity, it better be done within analyzer 551 FeaturewiseDatasetMeasure.__init__(self, combiner=None, **kwargs) 552 553 self.__analyzer = analyzer 554 """Analyzer to use per split""" 555 556 self.__combiner = combiner 557 """Which functor to use to combine all sensitivities""" 558 559 self.__splitter = splitter 560 """Splitter to be used on the dataset""" 561 562 self.__insplit_index = insplit_index
563
564 - def _call(self, dataset):
565 # local bindings 566 analyzer = self.__analyzer 567 insplit_index = self.__insplit_index 568 569 sensitivities = [] 570 self.splits = splits = [] 571 store_splits = self.states.isEnabled("splits") 572 573 for ind,split in enumerate(self.__splitter(dataset)): 574 ds = split[insplit_index] 575 if __debug__ and "SA" in debug.active: 576 debug("SA", "Computing sensitivity for split %d on " 577 "dataset %s using %s" % (ind, ds, analyzer)) 578 sensitivity = analyzer(ds) 579 sensitivities.append(sensitivity) 580 if store_splits: splits.append(split) 581 582 self.sensitivities = sensitivities 583 if __debug__: 584 debug("SA", 585 "Returning sensitivities combined using %s across %d items " 586 "generated by splitter %s" % 587 (self.__combiner, len(sensitivities), self.__splitter)) 588 589 if self.__combiner is not None: 590 sensitivities = self.__combiner(sensitivities) 591 else: 592 # assure that we have an ndarray on output 593 sensitivities = N.asarray(sensitivities) 594 return sensitivities
595 596
597 -class BoostedClassifierSensitivityAnalyzer(Sensitivity):
598 """Set sensitivity analyzers to be merged into a single output""" 599 600 601 # XXX we might like to pass parameters also for combined_analyzer 602 @group_kwargs(prefixes=['slave_'], assign=True)
603 - def __init__(self, 604 clf, 605 analyzer=None, 606 combined_analyzer=None, 607 slave_kwargs={}, 608 **kwargs):
609 """Initialize Sensitivity Analyzer for `BoostedClassifier` 610 611 :Parameters: 612 clf : `BoostedClassifier` 613 Classifier to be used 614 analyzer : analyzer 615 Is used to populate combined_analyzer 616 slave_* 617 Arguments to pass to created analyzer if analyzer is None 618 """ 619 Sensitivity.__init__(self, clf, **kwargs) 620 if combined_analyzer is None: 621 # sanitarize kwargs 622 kwargs.pop('force_training', None) 623 combined_analyzer = CombinedFeaturewiseDatasetMeasure(**kwargs) 624 self.__combined_analyzer = combined_analyzer 625 """Combined analyzer to use""" 626 627 if analyzer is not None and len(self._slave_kwargs): 628 raise ValueError, \ 629 "Provide either analyzer of slave_* arguments, not both" 630 self.__analyzer = analyzer 631 """Analyzer to use for basic classifiers within boosted classifier"""
632 633
634 - def _call(self, dataset):
635 analyzers = [] 636 # create analyzers 637 for clf in self.clf.clfs: 638 if self.__analyzer is None: 639 analyzer = clf.getSensitivityAnalyzer(**(self._slave_kwargs)) 640 if analyzer is None: 641 raise ValueError, \ 642 "Wasn't able to figure basic analyzer for clf %s" % \ 643 `clf` 644 if __debug__: 645 debug("SA", "Selected analyzer %s for clf %s" % \ 646 (`analyzer`, `clf`)) 647 else: 648 # XXX shallow copy should be enough... 649 analyzer = copy.copy(self.__analyzer) 650 651 # assign corresponding classifier 652 analyzer.clf = clf 653 # if clf was trained already - don't train again 654 if clf.trained: 655 analyzer._force_training = False 656 analyzers.append(analyzer) 657 658 self.__combined_analyzer.analyzers = analyzers 659 660 # XXX not sure if we don't want to call directly ._call(dataset) to avoid 661 # double application of transformers/combiners, after all we are just 662 # 'proxying' here to combined_analyzer... 663 # YOH: decided -- lets call ._call 664 return self.__combined_analyzer._call(dataset)
665 666 combined_analyzer = property(fget=lambda x:x.__combined_analyzer)
667 668
669 -class ProxyClassifierSensitivityAnalyzer(Sensitivity):
670 """Set sensitivity analyzer output just to pass through""" 671 672 @group_kwargs(prefixes=['slave_'], assign=True)
673 - def __init__(self, 674 clf, 675 analyzer=None, 676 **kwargs):
677 """Initialize Sensitivity Analyzer for `BoostedClassifier` 678 """ 679 Sensitivity.__init__(self, clf, **kwargs) 680 681 if analyzer is not None and len(self._slave_kwargs): 682 raise ValueError, \ 683 "Provide either analyzer of slave_* arguments, not both" 684 685 self.__analyzer = analyzer 686 """Analyzer to use for basic classifiers within boosted classifier"""
687 688
689 - def _call(self, dataset):
690 # OPT: local bindings 691 clfclf = self.clf.clf 692 analyzer = self.__analyzer 693 694 if analyzer is None: 695 analyzer = clfclf.getSensitivityAnalyzer( 696 **(self._slave_kwargs)) 697 if analyzer is None: 698 raise ValueError, \ 699 "Wasn't able to figure basic analyzer for clf %s" % \ 700 `clfclf` 701 if __debug__: 702 debug("SA", "Selected analyzer %s for clf %s" % \ 703 (analyzer, clfclf)) 704 # bind to the instance finally 705 self.__analyzer = analyzer 706 707 # TODO "remove" unnecessary things below on each call... 708 # assign corresponding classifier 709 analyzer.clf = clfclf 710 711 # if clf was trained already - don't train again 712 if clfclf.trained: 713 analyzer._force_training = False 714 715 return analyzer._call(dataset)
716 717 analyzer = property(fget=lambda x:x.__analyzer)
718 719
720 -class MappedClassifierSensitivityAnalyzer(ProxyClassifierSensitivityAnalyzer):
721 """Set sensitivity analyzer output be reverse mapped using mapper of the 722 slave classifier""" 723
724 - def _call(self, dataset):
725 sens = super(MappedClassifierSensitivityAnalyzer, self)._call(dataset) 726 # So we have here the case that some sensitivities are given 727 # as nfeatures x nclasses, thus we need to take .T for the 728 # mapper and revert back afterwards 729 # devguide's TODO lists this point to 'disguss' 730 sens_mapped = self.clf.maskclf.mapper.reverse(sens.T) 731 return sens_mapped.T
732