Changeset e12c6c in git


Ignore:
Timestamp:
Feb 2, 2018, 8:18:02 AM (6 years ago)
Author:
Andreas Steenpass <steenpass@…>
Branches:
(u'spielwiese', 'fe61d9c35bf7c61f2b6cbf1b56e25e2f08d536cc')
Children:
9b67d65cca6ba85b6bbd058c2032a7592d15660f
Parents:
e7be125e3c4930d177dccfb8d52e2f0553e56f1f
git-author:
Andreas Steenpass <steenpass@mathematik.uni-kl.de>2018-02-02 08:18:02+01:00
git-committer:
Andreas Steenpass <steenpass@mathematik.uni-kl.de>2018-02-02 11:17:17+01:00
Message:
chg: introduce option use_cache for syFrank()
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • Singular/iparith.cc

    re7be12 re12c6c  
    22262226        WerrorS("wrong optional argument for fres");
    22272227    }
    2228     syStrategy r = syFrank(id, max_length, method, false);
     2228    syStrategy r = syFrank(id, max_length, method);
    22292229    assume(r->fullres != NULL);
    22302230    res->data = (void *)r;
  • kernel/GBEngine/syz.h

    re7be12 re12c6c  
    9999syStrategy syKosz(ideal arg,int * length);
    100100
     101// use_cache and use_tensor_trick are needed in PrymGreen.jl; do not delete!
    101102syStrategy syFrank(const ideal arg, const int length, const char *method,
    102         const bool use_tensor_trick);
     103        const bool use_cache = true, const bool use_tensor_trick = false);
    103104
    104105void syKillComputation(syStrategy syzstr, ring r=currRing);
  • kernel/GBEngine/syz4.cc

    re7be12 re12c6c  
    1515#include <vector>
    1616#include <map>
    17 
    18 /*
    19  * If set to true, the result of compute_image() is cached for _every_ term in
    20  * the current step of the resolution. This corresponds to the subtree attached
    21  * to the node which represents this term, see reference.
    22  */
    23 #define CACHE 1
    2417
    2518/*
     
    144137}
    145138
    146 #if CACHE
    147139static poly traverse_tail(const poly multiplier, const int comp,
    148140        const ideal previous_module, const std::vector<bool> &variables,
    149141        const lt_struct *const *const hash_previous_module);
    150 #else
     142
    151143static poly compute_image(const poly multiplier, const int comp,
    152144        const ideal previous_module, const std::vector<bool> &variables,
    153         const lt_struct *const *const hash_previous_module);
    154 #define traverse_tail compute_image
    155 #endif   // CACHE
     145        const lt_struct *const *const hash_previous_module,
     146        const bool use_cache);
    156147
    157148/*
     
    160151static poly reduce_term(const poly multiplier, const poly term,
    161152        const ideal previous_module, const std::vector<bool> &variables,
    162         const lt_struct *const *const hash_previous_module)
     153        const lt_struct *const *const hash_previous_module,
     154        const bool use_cache)
    163155{
    164156    poly s = find_reducer(multiplier, term, hash_previous_module);
     
    168160    const ring r = currRing;
    169161    const int c = __p_GetComp(s, r) - 1;
    170     const poly t = traverse_tail(s, c, previous_module, variables,
    171             hash_previous_module);
     162    poly t;
     163    if (use_cache) {
     164        t = traverse_tail(s, c, previous_module, variables,
     165                hash_previous_module);
     166    } else {
     167        t = compute_image(s, c, previous_module, variables,
     168                hash_previous_module, false);
     169    }
    172170    return p_Add_q(s, t, r);
    173171}
     
    179177static poly compute_image(const poly multiplier, const int comp,
    180178        const ideal previous_module, const std::vector<bool> &variables,
    181         const lt_struct *const *const hash_previous_module)
     179        const lt_struct *const *const hash_previous_module,
     180        const bool use_cache)
    182181{
    183182    const poly tail = previous_module->m[comp]->next;
     
    188187    for (poly p = tail; p != NULL; p = pNext(p)) {
    189188        const poly rt = reduce_term(multiplier, p, previous_module, variables,
    190                 hash_previous_module);
     189                hash_previous_module, use_cache);
    191190        sBucket_Add_p(sum, rt, pLength(rt));
    192191    }
     
    198197}
    199198
    200 #if CACHE
    201199struct cache_compare
    202200{
     
    267265    }
    268266    poly p = compute_image(multiplier, comp, previous_module, variables,
    269             hash_previous_module);
     267            hash_previous_module, true);
    270268    insert_into_cache_term(T, multiplier, p);
    271269    return p;
    272270}
    273 #endif   // CACHE
    274271
    275272/*
     
    278275static poly lift_ext_LT(const poly a, const ideal previous_module,
    279276        const std::vector<bool> &variables,
    280         const lt_struct *const *const hash_previous_module)
     277        const lt_struct *const *const hash_previous_module,
     278        const bool use_cache)
    281279{
    282280    const ring R = currRing;
    283281    // the leading term does not need to be cached
    284282    poly t1 = compute_image(a, __p_GetComp(a, R)-1, previous_module, variables,
    285             hash_previous_module);
    286     poly t2 = traverse_tail(a->next, __p_GetComp(a->next, R)-1,
    287             previous_module, variables, hash_previous_module);
     283            hash_previous_module, use_cache);
     284    poly t2;
     285    if (use_cache) {
     286        t2 = traverse_tail(a->next, __p_GetComp(a->next, R)-1,
     287                previous_module, variables, hash_previous_module);
     288    } else {
     289        t2 = compute_image(a->next, __p_GetComp(a->next, R)-1,
     290                previous_module, variables, hash_previous_module, false);
     291    }
    288292    t1 = p_Add_q(t1, t2, R);
    289293    return t1;
     
    527531 */
    528532static void computeLiftings(const resolvente res, const int index,
    529         const std::vector<bool> &variables)
    530 {
    531 #if CACHE
    532     initialize_cache(res[index-1]->ncols);
    533 #endif   // CACHE
     533        const std::vector<bool> &variables, const bool use_cache)
     534{
     535    if (use_cache) {
     536        initialize_cache(res[index-1]->ncols);
     537    }
    534538    lt_struct **hash_previous_module
    535539        = (lt_struct **)omAlloc((res[index-1]->rank+1)*sizeof(lt_struct *));
     
    537541    for (int j = res[index]->ncols-1; j >= 0; j--) {
    538542        res[index]->m[j]->next->next = lift_ext_LT(res[index]->m[j],
    539                 res[index-1], variables, hash_previous_module);
     543                res[index-1], variables, hash_previous_module, use_cache);
    540544    }
    541545    for (int i = 0; i <= res[index-1]->rank; i++) {
     
    543547    }
    544548    omFree(hash_previous_module);
    545 #if CACHE
    546     delete_cache(res[index-1]->ncols);
    547 #endif   // CACHE
     549    if (use_cache) {
     550        delete_cache(res[index-1]->ncols);
     551    }
    548552}
    549553
     
    600604static int computeResolution_iteration(resolvente res, const int max_index,
    601605        syzHeadFunction *syzHead, const bool do_lifting,
    602         const bool single_module, const bool use_tensor_trick,
    603         std::vector<bool> &variables)
     606        const bool single_module, const bool use_cache,
     607        const bool use_tensor_trick, std::vector<bool> &variables)
    604608{
    605609    int index = 1;
    606610    while (!idIs0(res[index])) {
    607611        if (do_lifting) {
    608             computeLiftings(res, index, variables);
     612            computeLiftings(res, index, variables, use_cache);
    609613            if (single_module) {
    610614                delete_tails(res, index-1);
     
    632636static int computeResolution(resolvente res, const int max_index,
    633637        syzHeadFunction *syzHead, const bool do_lifting,
    634         const bool single_module, const bool use_tensor_trick)
     638        const bool single_module, const bool use_cache,
     639        const bool use_tensor_trick)
    635640{
    636641    if (idIs0(res[0])) {
     
    649654        res[1] = computeFrame(res[0], syzM_i_unsorted, syzHead);
    650655        index = computeResolution_iteration(res, max_index, syzHead,
    651                 do_lifting, single_module, use_tensor_trick, variables);
     656                do_lifting, single_module, use_cache, use_tensor_trick,
     657                variables);
    652658    }
    653659    variables.clear();
     
    726732 * Compute the Schreyer resolution of arg, see reference at the beginning of
    727733 * this file.
     734 *
     735 * If use_cache == true (default), the result of compute_image() is cached for
     736 * _every_ term in the current step of the resolution. This corresponds to the
     737 * subtree attached to the node which represents this term, see reference.
     738 *
    728739 * If use_tensor_trick == true, the current module is modfied after each
    729740 * lifting step in the resolution: any term which contains a variable which
     
    731742 * resulting object is not necessarily a complex anymore. However, constant
    732743 * entries remain exactly the same. This option does not apply for
    733  * method == "frame" and method "extended frame". It is used in PrymGreen.jl;
    734  * do not delete!
     744 * method == "frame" and method "extended frame".
     745 *
     746 * These two options are used in PrymGreen.jl; do not delete!
    735747 */
    736748syStrategy syFrank(const ideal arg, const int length, const char *method,
    737         const bool use_tensor_trick)
     749        const bool use_cache, const bool use_tensor_trick)
    738750{
    739751    syStrategy result = (syStrategy)omAlloc0(sizeof(ssyStrategy));
     
    749761    set_options(&syzHead, &do_lifting, &single_module, method);
    750762    int new_length = computeResolution(res, length-1, syzHead, do_lifting,
    751             single_module, use_tensor_trick);
     763            single_module, use_cache, use_tensor_trick);
    752764    if (new_length < length) {
    753765        res = (resolvente)omReallocSize(res, (length+1)*sizeof(ideal),
Note: See TracChangeset for help on using the changeset viewer.