Changeset 1102dd in git


Ignore:
Timestamp:
Feb 2, 2018, 1:50:34 PM (6 years ago)
Author:
Hans Schoenemann <hannes@…>
Branches:
(u'spielwiese', '4a9821a93ffdc22a6696668bd4f6b8c9de3e6c5f')
Children:
7f0617292f0c36c3841f1264376b57d9b114c5f2
Parents:
eb4ae817a42927ba2ac7a0273e69d1d14a2958099b67d65cca6ba85b6bbd058c2032a7592d15660f
git-author:
Hans Schoenemann <hannes@mathematik.uni-kl.de>2018-02-02 13:50:34+01:00
git-committer:
GitHub <noreply@github.com>2018-02-02 13:50:34+01:00
Message:
Merge pull request #849 from steenpass/fres

update fres()
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • Singular/iparith.cc

    reb4ae8 r1102dd  
    22222222    if (strcmp(method, "complete") != 0
    22232223            && strcmp(method, "frame") != 0
    2224             && strcmp(method, "extended frame") != 0) {
     2224            && strcmp(method, "extended frame") != 0
     2225            && strcmp(method, "single module") != 0) {
    22252226        WerrorS("wrong optional argument for fres");
    22262227    }
  • kernel/GBEngine/syz.h

    reb4ae8 r1102dd  
    9999syStrategy syKosz(ideal arg,int * length);
    100100
    101 syStrategy syFrank(const ideal arg, const int length, const char *method);
     101// use_cache and use_tensor_trick are needed in PrymGreen.jl; do not delete!
     102syStrategy syFrank(const ideal arg, const int length, const char *method,
     103        const bool use_cache = true, const bool use_tensor_trick = false);
    102104
    103105void syKillComputation(syStrategy syzstr, ring r=currRing);
  • kernel/GBEngine/syz4.cc

    reb4ae8 r1102dd  
    1515#include <vector>
    1616#include <map>
    17 
    18 /*
    19  * If set to true, the result of compute_image() is cached for _every_ term in
    20  * the current step of the resolution. This corresponds to the subtree attached
    21  * to the node which represents this term, see reference.
    22  */
    23 #define CACHE 1
    2417
    2518/*
     
    5043 * used to determine lower order terms.
    5144 */
    52 static inline bool check_variables(const std::vector<bool> variables,
     45static inline bool check_variables(const std::vector<bool> &variables,
    5346        const poly m)
    5447{
     
    144137}
    145138
    146 #if CACHE
    147139static poly traverse_tail(const poly multiplier, const int comp,
    148140        const ideal previous_module, const std::vector<bool> &variables,
    149141        const lt_struct *const *const hash_previous_module);
    150 #else
     142
    151143static poly compute_image(const poly multiplier, const int comp,
    152144        const ideal previous_module, const std::vector<bool> &variables,
    153         const lt_struct *const *const hash_previous_module);
    154 #define traverse_tail compute_image
    155 #endif   // CACHE
     145        const lt_struct *const *const hash_previous_module,
     146        const bool use_cache);
    156147
    157148/*
     
    160151static poly reduce_term(const poly multiplier, const poly term,
    161152        const ideal previous_module, const std::vector<bool> &variables,
    162         const lt_struct *const *const hash_previous_module)
     153        const lt_struct *const *const hash_previous_module,
     154        const bool use_cache)
    163155{
    164156    poly s = find_reducer(multiplier, term, hash_previous_module);
     
    168160    const ring r = currRing;
    169161    const int c = __p_GetComp(s, r) - 1;
    170     const poly t = traverse_tail(s, c, previous_module, variables,
    171             hash_previous_module);
     162    poly t;
     163    if (use_cache) {
     164        t = traverse_tail(s, c, previous_module, variables,
     165                hash_previous_module);
     166    } else {
     167        t = compute_image(s, c, previous_module, variables,
     168                hash_previous_module, false);
     169    }
    172170    return p_Add_q(s, t, r);
    173171}
     
    179177static poly compute_image(const poly multiplier, const int comp,
    180178        const ideal previous_module, const std::vector<bool> &variables,
    181         const lt_struct *const *const hash_previous_module)
     179        const lt_struct *const *const hash_previous_module,
     180        const bool use_cache)
    182181{
    183182    const poly tail = previous_module->m[comp]->next;
     
    188187    for (poly p = tail; p != NULL; p = pNext(p)) {
    189188        const poly rt = reduce_term(multiplier, p, previous_module, variables,
    190                 hash_previous_module);
     189                hash_previous_module, use_cache);
    191190        sBucket_Add_p(sum, rt, pLength(rt));
    192191    }
     
    198197}
    199198
    200 #if CACHE
    201199struct cache_compare
    202200{
     
    267265    }
    268266    poly p = compute_image(multiplier, comp, previous_module, variables,
    269             hash_previous_module);
     267            hash_previous_module, true);
    270268    insert_into_cache_term(T, multiplier, p);
    271269    return p;
    272270}
    273 #endif   // CACHE
    274271
    275272/*
     
    278275static poly lift_ext_LT(const poly a, const ideal previous_module,
    279276        const std::vector<bool> &variables,
    280         const lt_struct *const *const hash_previous_module)
     277        const lt_struct *const *const hash_previous_module,
     278        const bool use_cache)
    281279{
    282280    const ring R = currRing;
    283281    // the leading term does not need to be cached
    284282    poly t1 = compute_image(a, __p_GetComp(a, R)-1, previous_module, variables,
    285             hash_previous_module);
    286     poly t2 = traverse_tail(a->next, __p_GetComp(a->next, R)-1,
    287             previous_module, variables, hash_previous_module);
     283            hash_previous_module, use_cache);
     284    poly t2;
     285    if (use_cache) {
     286        t2 = traverse_tail(a->next, __p_GetComp(a->next, R)-1,
     287                previous_module, variables, hash_previous_module);
     288    } else {
     289        t2 = compute_image(a->next, __p_GetComp(a->next, R)-1,
     290                previous_module, variables, hash_previous_module, false);
     291    }
    288292    t1 = p_Add_q(t1, t2, R);
    289293    return t1;
     
    527531 */
    528532static void computeLiftings(const resolvente res, const int index,
    529         std::vector<bool> &variables)
    530 {
    531     update_variables(variables, res[index-1]);
    532     if (index == 2) {   // we don't know if the input is a reduced SB
    533         variables[currRing->N] = false;
    534     }
    535 #if CACHE
    536     initialize_cache(res[index-1]->ncols);
    537 #endif   // CACHE
     533        const std::vector<bool> &variables, const bool use_cache)
     534{
     535    if (use_cache) {
     536        initialize_cache(res[index-1]->ncols);
     537    }
    538538    lt_struct **hash_previous_module
    539539        = (lt_struct **)omAlloc((res[index-1]->rank+1)*sizeof(lt_struct *));
     
    541541    for (int j = res[index]->ncols-1; j >= 0; j--) {
    542542        res[index]->m[j]->next->next = lift_ext_LT(res[index]->m[j],
    543                 res[index-1], variables, hash_previous_module);
     543                res[index-1], variables, hash_previous_module, use_cache);
    544544    }
    545545    for (int i = 0; i <= res[index-1]->rank; i++) {
     
    547547    }
    548548    omFree(hash_previous_module);
    549 #if CACHE
    550     delete_cache(res[index-1]->ncols);
    551 #endif   // CACHE
     549    if (use_cache) {
     550        delete_cache(res[index-1]->ncols);
     551    }
     552}
     553
     554/*
     555 * check if the monomial m contains any of the variables set to false
     556 */
     557static inline bool contains_unused_variable(const poly m,
     558    const std::vector<bool> &variables)
     559{
     560    const ring R = currRing;
     561    for (int j = R->N; j > 0; j--) {
     562        if (!variables[j-1] && p_GetExp(m, j, R) > 0) {
     563            return true;
     564        }
     565    }
     566    return false;
     567}
     568
     569/*
     570 * delete any term in res[index] which contains any of the variables set to
     571 * false
     572 */
     573static void delete_variables(resolvente res, const int index,
     574    const std::vector<bool> &variables)
     575{
     576    for (int i = 0; i < res[index]->ncols; i++) {
     577        poly p_iter = res[index]->m[i]->next;
     578        if (p_iter != NULL) {
     579            while (p_iter->next != NULL) {
     580                if (contains_unused_variable(p_iter->next, variables)) {
     581                    pLmDelete(&p_iter->next);
     582                } else {
     583                    pIter(p_iter);
     584                }
     585            }
     586        }
     587    }
     588}
     589
     590static void delete_tails(resolvente res, const int index)
     591{
     592    const ring r = currRing;
     593    for (int i = 0; i < res[index]->ncols; i++) {
     594        if (res[index]->m[i] != NULL) {
     595            p_Delete(&(res[index]->m[i]->next), r);
     596        }
     597    }
    552598}
    553599
     
    556602 * either index == max_index is reached or res[index] is the zero module
    557603 */
     604static int computeResolution_iteration(resolvente res, const int max_index,
     605        syzHeadFunction *syzHead, const bool do_lifting,
     606        const bool single_module, const bool use_cache,
     607        const bool use_tensor_trick, std::vector<bool> &variables)
     608{
     609    int index = 1;
     610    while (!idIs0(res[index])) {
     611        if (do_lifting) {
     612            computeLiftings(res, index, variables, use_cache);
     613            if (single_module) {
     614                delete_tails(res, index-1);
     615            }
     616            // we don't know if the input is a reduced SB:
     617            if (index == 1) {
     618                variables[currRing->N] = false;
     619            }
     620            update_variables(variables, res[index]);
     621            if (use_tensor_trick) {
     622                delete_variables(res, index, variables);
     623            }
     624        }
     625        if (index >= max_index) { break; }
     626        index++;
     627        res[index] = computeFrame(res[index-1], syzM_i_sorted, syzHead);
     628    }
     629    return index;
     630}
     631
     632/*
     633 * compute the frame of the first syzygy module and set variables, then call
     634 * computeResolution_iteration() for the remaining steps
     635 */
    558636static int computeResolution(resolvente res, const int max_index,
    559         syzHeadFunction *syzHead, const bool do_lifting)
    560 {
     637        syzHeadFunction *syzHead, const bool do_lifting,
     638        const bool single_module, const bool use_cache,
     639        const bool use_tensor_trick)
     640{
     641    if (idIs0(res[0])) {
     642        return 1;
     643    }
     644    std::vector<bool> variables;
     645    variables.resize(currRing->N+1, true);
     646    if (do_lifting) {
     647        update_variables(variables, res[0]);
     648        if (use_tensor_trick) {
     649            delete_variables(res, 0, variables);
     650        }
     651    }
    561652    int index = 0;
    562     if (!idIs0(res[0]) && 0 < max_index) {
    563         index++;
     653    if (max_index > 0) {
    564654        res[1] = computeFrame(res[0], syzM_i_unsorted, syzHead);
    565         std::vector<bool> variables;
    566         variables.resize(currRing->N+1, true);
    567         while (!idIs0(res[index])) {
    568             if (do_lifting) {
    569                 computeLiftings(res, index, variables);
    570             }
    571             if (index >= max_index) { break; }
    572             index++;
    573             res[index] = computeFrame(res[index-1], syzM_i_sorted, syzHead);
    574         }
    575         variables.clear();
    576     }
     655        index = computeResolution_iteration(res, max_index, syzHead,
     656                do_lifting, single_module, use_cache, use_tensor_trick,
     657                variables);
     658    }
     659    variables.clear();
    577660    return index+1;
    578661}
    579662
    580663static void set_options(syzHeadFunction **syzHead_ptr, bool *do_lifting_ptr,
    581         const char *method)
     664        bool *single_module_ptr, const char *method)
    582665{
    583666    if (strcmp(method, "complete") == 0) {   // default
    584667        *syzHead_ptr = syzHeadExtFrame;
    585668        *do_lifting_ptr = true;
     669        *single_module_ptr = false;
    586670    }
    587671    else if (strcmp(method, "frame") == 0) {
    588672        *syzHead_ptr = syzHeadFrame;
    589673        *do_lifting_ptr = false;
     674        *single_module_ptr = false;
    590675    }
    591676    else if (strcmp(method, "extended frame") == 0) {
    592677        *syzHead_ptr = syzHeadExtFrame;
    593678        *do_lifting_ptr = false;
     679        *single_module_ptr = false;
     680    }
     681    else if (strcmp(method, "single module") == 0) {
     682        *syzHead_ptr = syzHeadExtFrame;
     683        *do_lifting_ptr = true;
     684        *single_module_ptr = true;
    594685    }
    595686    else {   // "linear strand" (not yet implemented)
    596687        *syzHead_ptr = syzHeadExtFrame;
    597688        *do_lifting_ptr = true;
     689        *single_module_ptr = false;
    598690    }
    599691}
     
    619711
    620712/*
    621  * for each poly in the resolution, insert the first two terms at their right
    622  * places
    623  */
    624 static void insert_ext_induced_LTs(const resolvente res, const int length)
     713 * For each poly in the resolution, insert the first two terms at their right
     714 * places. If single_module is true, then only consider the last module.
     715 */
     716static void insert_ext_induced_LTs(const resolvente res, const int length,
     717        const bool single_module)
    625718{
    626719    const ring R = currRing;
    627720    poly p, q;
    628     for (int i = length-2; i > 0; i--) {
    629         for (int j = res[i]->ncols-1; j >= 0; j--) {
    630             insert_first_term(res[i]->m[j]->next, p, q, R);
    631             insert_first_term(res[i]->m[j], p, q, R);
    632         }
    633     }
    634 }
    635 
    636 /*
    637  * compute the Schreyer resolution of arg, see reference at the beginning of
    638  * this file
    639  */
    640 syStrategy syFrank(const ideal arg, const int length, const char *method)
     721    int index = (single_module ? length-1 : 1);
     722    while (index < length && !idIs0(res[index])) {
     723        for (int j = res[index]->ncols-1; j >= 0; j--) {
     724            insert_first_term(res[index]->m[j]->next, p, q, R);
     725            insert_first_term(res[index]->m[j], p, q, R);
     726        }
     727        index++;
     728    }
     729}
     730
     731/*
     732 * Compute the Schreyer resolution of arg, see reference at the beginning of
     733 * this file.
     734 *
     735 * If use_cache == true (default), the result of compute_image() is cached for
     736 * _every_ term in the current step of the resolution. This corresponds to the
     737 * subtree attached to the node which represents this term, see reference.
     738 *
     739 * If use_tensor_trick == true, the current module is modfied after each
     740 * lifting step in the resolution: any term which contains a variable which
     741 * does not appear among the (induced) leading terms is deleted. Note that the
     742 * resulting object is not necessarily a complex anymore. However, constant
     743 * entries remain exactly the same. This option does not apply for
     744 * method == "frame" and method "extended frame".
     745 *
     746 * These two options are used in PrymGreen.jl; do not delete!
     747 */
     748syStrategy syFrank(const ideal arg, const int length, const char *method,
     749        const bool use_cache, const bool use_tensor_trick)
    641750{
    642751    syStrategy result = (syStrategy)omAlloc0(sizeof(ssyStrategy));
     
    649758    syzHeadFunction *syzHead;
    650759    bool do_lifting;
    651     set_options(&syzHead, &do_lifting, method);
    652     int new_length = computeResolution(res, length-1, syzHead, do_lifting);
     760    bool single_module;
     761    set_options(&syzHead, &do_lifting, &single_module, method);
     762    int new_length = computeResolution(res, length-1, syzHead, do_lifting,
     763            single_module, use_cache, use_tensor_trick);
    653764    if (new_length < length) {
    654765        res = (resolvente)omReallocSize(res, (length+1)*sizeof(ideal),
     
    656767    }
    657768    if (strcmp(method, "frame") != 0) {
    658         insert_ext_induced_LTs(res, new_length);
     769        insert_ext_induced_LTs(res, new_length, single_module);
    659770    }
    660771    result->fullres = res;
  • libpolys/coeffs/ffields.cc

    r9b67d6 r1102dd  
    807807  if (n==n_GF) {
    808808    GFInfo* p = (GFInfo *)(parameter);
    809     int c = pow (p->GFChar, p->GFDegree);
     809    int c = (int)pow ((double)p->GFChar, (double)p->GFDegree);
    810810    if ((c == r->m_nfCharQ) && (strcmp(n_ParameterNames(r)[0], p->GFPar_name) == 0))
    811811      return TRUE;
     
    928928  }
    929929
    930   int c = pow (p->GFChar, p->GFDegree);
     930  int c = (int)pow ((double)p->GFChar, (double)p->GFDegree);
    931931
    932932  nfReadTable(c, r);
Note: See TracChangeset for help on using the changeset viewer.