Rename mpiProgram flag to sharedMemoryParallelization

This commit is contained in:
2024-06-19 18:08:27 +03:00
parent 305d43c591
commit 1be169e7ff
25 changed files with 84 additions and 84 deletions

View File

@@ -45,7 +45,7 @@ using std::get;
using std::string;
using std::wstring;
extern int mpiProgram;
extern int sharedMemoryParallelization;
static vector<pair<string, vector<Expression*>>>
groupRealignsDirs(const vector<pair<string, vector<Expression*>>>& toRealign)
@@ -453,7 +453,7 @@ void createParallelDirs(File *file,
auto& tmp = dataDirectives.distrRules;
vector<pair<DIST::Array*, const DistrVariant*>> currentVar;
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
for (int z1 = 0; z1 < currentVariant.size(); ++z1)
currentVar.push_back(make_pair(tmp[z1].first, &tmp[z1].second[currentVariant[z1]]));

View File

@@ -525,7 +525,7 @@ void createParallelDirectives(const map<LoopGraph*, map<DIST::Array*, ArrayInfo*
const int itersCount = currLoop->calculatedCountOfIters;
uint64_t regId = currReg->GetId();
if (mpiProgram)
if (sharedMemoryParallelization)
regId = (uint64_t)currLoop;
const DIST::Arrays<int> &allArrays = currReg->GetAllArrays();
@@ -749,7 +749,7 @@ void createParallelDirectives(const map<LoopGraph*, map<DIST::Array*, ArrayInfo*
const int dimPos = mainArray.dimentionPos;
//change array to template if ACROSS was not found or not loop_array
if (mainArray.underAcross == false && !(mpiProgram == 1 && mainArray.arrayRef->IsLoopArray()))
if (mainArray.underAcross == false && !(sharedMemoryParallelization == 1 && mainArray.arrayRef->IsLoopArray()))
{
set<DIST::Array*> realArrayRef;
getRealArrayRefs(mainArray.arrayRef, mainArray.arrayRef, realArrayRef, arrayLinksByFuncCalls);
@@ -1762,7 +1762,7 @@ static bool addRedistributionDirs(File* file, const vector<pair<DIST::Array*, co
}
needToSkip = false;
if (mpiProgram)
if (sharedMemoryParallelization)
return false;
// Realign with global template clones

View File

@@ -39,7 +39,7 @@ using std::make_tuple;
static const string dvmhModuleName = "dvmh_template_mod";
extern int mpiProgram;
extern int sharedMemoryParallelization;
//the size of vector indiceates type of DVM_DIR
SgStatement* createStatFromExprs(const vector<Expression*> &exprs)
@@ -2384,7 +2384,7 @@ void insertParallelDirs(SgFile *file, bool extract,
const char* file_name = file->filename();
insertDirectiveToFile(file, file_name, createdDirectives, extract, messages);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
map<string, FuncInfo*> mapFuncInfo;
createMapOfFunc(callGraph, mapFuncInfo);
@@ -2420,7 +2420,7 @@ void insertParallelDirs(SgFile *file, bool extract,
for (auto& array : declaredArrays)
array.second.first->ClearShadowSpecs();
}
else if (mpiProgram == 0)
else if (sharedMemoryParallelization == 0)
{
set<uint64_t> regNum;
for (int z = 0; z < parallelRegions.size(); ++z)

View File

@@ -22,7 +22,7 @@ struct FuncInfo;
#define TO_STR std::to_string
#if __SPF
extern int mpiProgram;
extern int sharedMemoryParallelization;
#endif
namespace Distribution
@@ -117,7 +117,7 @@ namespace Distribution
if (it == templateInfo.end())
{
#if __SPF
if (withCheck && mpiProgram != 0)
if (withCheck && sharedMemoryParallelization != 0)
printInternalError(convertFileName(__FILE__).c_str(), __LINE__);
#endif
currLink = new TemplateLink(dimSize);

View File

@@ -530,7 +530,7 @@ int createAlignDirs(DIST::GraphCSR<int, double, attrType> &reducedG, const DIST:
{
DIST::Array* array = arrayPair.second;
if (mpiProgram != 0)
if (sharedMemoryParallelization != 0)
if (onlyThese.find(array) == onlyThese.end())
continue;
@@ -604,7 +604,7 @@ int createAlignDirs(DIST::GraphCSR<int, double, attrType> &reducedG, const DIST:
printInternalError(convertFileName(__FILE__).c_str(), __LINE__);
}
if (isAllRulesEqualWithoutArray(rules) || mpiProgram != 0)
if (isAllRulesEqualWithoutArray(rules) || sharedMemoryParallelization != 0)
{
bool hasError = createNewAlignRule(array, allArrays, rules[0], dataDirectives, SPF_messages, canNotAlign == NULL);
if (hasError)

View File

@@ -281,10 +281,10 @@ static vector<SgExpression*>
for (int z = 0; z < loops.size(); ++z)
{
currLoop = loops[z];
const uint64_t regId = mpiProgram ? (uint64_t)currLoop : currLoop->region->GetId();
const uint64_t regId = sharedMemoryParallelization ? (uint64_t)currLoop : currLoop->region->GetId();
auto dirForLoop = currLoop->directiveForLoop;
auto tmplP = pairs.first->GetTemplateArray(regId, mpiProgram != 0);
auto tmplP = pairs.first->GetTemplateArray(regId, sharedMemoryParallelization != 0);
auto links = pairs.first->GetLinksWithTemplate(regId);
// no mapping for this loop, skip this
@@ -324,10 +324,10 @@ static vector<SgExpression*>
if (realRefsLocal.size() == 0)
printInternalError(convertFileName(__FILE__).c_str(), __LINE__);
auto tmplP = (*realRefsLocal.begin())->GetTemplateArray(regId, mpiProgram != 0);
auto tmplP = (*realRefsLocal.begin())->GetTemplateArray(regId, sharedMemoryParallelization != 0);
auto links = (*realRefsLocal.begin())->GetLinksWithTemplate(regId);
auto tmplP_et = pairs.first->GetTemplateArray(regId, mpiProgram != 0);
auto tmplP_et = pairs.first->GetTemplateArray(regId, sharedMemoryParallelization != 0);
auto links_et = pairs.first->GetLinksWithTemplate(regId);
if (tmplP == tmplP_et)
@@ -557,7 +557,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
dirStatement[2] = new Expression(expr);
if (mpiProgram)
if (sharedMemoryParallelization)
directive += ")";
else
directive += ") ON " + mapTo->GetShortName() + "(";
@@ -565,7 +565,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
SgArrayRefExp* arrayExpr = NULL;
string arrayExprS = "";
if (!mpiProgram)
if (!sharedMemoryParallelization)
{
SgSymbol* symbForPar = NULL;
if (arrayRef->IsTemplate())
@@ -652,7 +652,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
p->setLhs(makeExprList(list));
}
if (mpiProgram || across.size() != 0)
if (sharedMemoryParallelization || across.size() != 0)
{
if (!arrayRef2->IsLoopArray())
{
@@ -662,7 +662,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
loopsTie.push_back(loops[i]);
set<DIST::Array*> onlyFor;
if (mpiProgram == 0 && across.size())
if (sharedMemoryParallelization == 0 && across.size())
{
for (int k = 0; k < (int)across.size(); ++k)
{
@@ -672,7 +672,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
}
}
vector<SgExpression*> tieList;
if (mpiProgram)
if (sharedMemoryParallelization)
tieList = compliteTieList(currLoop, loopsTie, arrayLinksByFuncCalls, byUseInFunc, file, lineRange, onlyFor);
else if (onlyFor.size()) // not MPI regime
tieList = compliteTieList(currLoop, loopsTie, arrayLinksByFuncCalls, byUseInFunc, file, lineRange, onlyFor);
@@ -807,7 +807,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
}
}
if (shadowRenew.size() != 0 && mpiProgram == 0)
if (shadowRenew.size() != 0 && sharedMemoryParallelization == 0)
{
if (shadowRenewShifts.size() == 0)
{
@@ -976,7 +976,7 @@ ParallelDirective::genDirective(File* file, const vector<pair<DIST::Array*, cons
dirStatement[1] = new Expression(expr);
}
if (remoteAccess.size() != 0 && mpiProgram == 0)
if (remoteAccess.size() != 0 && sharedMemoryParallelization == 0)
{
if (dirStatement[1] != NULL)
{

View File

@@ -11,7 +11,7 @@
#include "../Utils/types.h"
#include "../Utils/utils.h"
extern int mpiProgram;
extern int sharedMemoryParallelization;
struct LoopGraph;

View File

@@ -111,7 +111,7 @@ ParallelDirective* operator+(const ParallelDirective &left, const ParallelDirect
checkNull(second, convertFileName(__FILE__).c_str(), __LINE__);
bool condition = first->arrayRef == second->arrayRef;
if (mpiProgram)
if (sharedMemoryParallelization)
condition = !hasConflictUniteOnRules(first->on, second->on) && !hasConflictUniteOnRules(first->on2, second->on2);
if (condition)
@@ -279,7 +279,7 @@ static inline string calculateShifts(DIST::GraphCSR<int, double, attrType> &redu
set<DIST::Array*> refs;
getRealArrayRefs(calcForArray, calcForArray, refs, arrayLinksByFuncCalls);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{//TODO: need to correct errors
/*for (auto& array : refs)
{
@@ -501,7 +501,7 @@ string ParallelDirective::genBounds(pair<pair<string, string>, vector<pair<int,
auto on_ext = on;
//replace to template align ::on
if (arrayRef->IsTemplate() == false && mpiProgram == 0)
if (arrayRef->IsTemplate() == false && sharedMemoryParallelization == 0)
{
vector<tuple<DIST::Array*, int, pair<int, int>>> ruleForRef =
getAlignRuleWithTemplate(arrayRef, arrayLinksByFuncCalls, reducedG, allArrays, regionId);

View File

@@ -1166,7 +1166,7 @@ void DvmhRegionInserter::removePrivatesFromParallelLoops()
if (lexPrev->variant() == DVM_PARALLEL_ON_DIR)
{
if (mpiProgram == 1)
if (sharedMemoryParallelization == 1)
lexPrev->deleteStmt();
else
{
@@ -1309,7 +1309,7 @@ void insertDvmhRegions(SgProject& project, int files, const vector<ParallelRegio
const map<DIST::Array*, set<DIST::Array*>> arrayLinksByFuncCalls)
{
vector<DvmhRegionInserter*> inserters;
const bool regionCondition = ((parallelRegions.size() == 0 && parallelRegions[0]->GetName() == "DEFAULT") || mpiProgram == 1);
const bool regionCondition = ((parallelRegions.size() == 0 && parallelRegions[0]->GetName() == "DEFAULT") || sharedMemoryParallelization == 1);
set<DIST::Array*> usedArraysInRegions;
set<DIST::Array*> usedWriteArraysInRegions;
@@ -1328,7 +1328,7 @@ void insertDvmhRegions(SgProject& project, int files, const vector<ParallelRegio
for (auto& loop : loopsForFile)
loop->analyzeParallelDirs();
DvmhRegionInserter* regionInserter = new DvmhRegionInserter(file, loopsForFile, rw_analyzer, arrayLinksByFuncCalls, mapOfFuncs, funcsForFile, mpiProgram == 1);
DvmhRegionInserter* regionInserter = new DvmhRegionInserter(file, loopsForFile, rw_analyzer, arrayLinksByFuncCalls, mapOfFuncs, funcsForFile, sharedMemoryParallelization == 1);
inserters.push_back(regionInserter);
//collect info about <parallel> functions

View File

@@ -849,12 +849,12 @@ void excludeArraysFromDistribution(const map<DIST::Array*, set<DIST::Array*>>& a
vector<ParallelRegion*> parallelRegions,
map<string, vector<Messages>>& SPF_messages,
map<tuple<int, string, string>, DIST::Array*>& createdArrays,
int mpiProgram)
int sharedMemoryParallelization)
{
checkArraysMapping(loopGraph, SPF_messages, arrayLinksByFuncCalls);
propagateArrayFlags(arrayLinksByFuncCalls, declaredArrays, SPF_messages);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
for (int z = 0; z < parallelRegions.size(); ++z)
filterArrayInCSRGraph(loopGraph, allFuncInfo, parallelRegions[z], arrayLinksByFuncCalls, SPF_messages);

View File

@@ -34,7 +34,7 @@ void createMapOfFunc(const std::vector<FuncInfo*> &allFuncInfo, std::map<std::pa
FuncInfo* getFuncInfo(const std::map<std::string, FuncInfo*> &funcMap, const std::string &funcName);
void updateFuncInfo(const std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo);
void excludeArraysFromDistribution(const std::map<DIST::Array*, std::set<DIST::Array*>>& arrayLinksByFuncCalls, const std::map<std::tuple<int, std::string, std::string>, std::pair<DIST::Array*, DIST::ArrayAccessInfo*>> declaredArrays, std::map<std::string, std::vector<LoopGraph*>>& loopGraph, std::vector<ParallelRegion*> parallelRegions, std::map<std::string, std::vector<Messages>>& SPF_messages, std::map<std::tuple<int, std::string, std::string>, DIST::Array*>& createdArrays, int mpiProgram = 0);
void excludeArraysFromDistribution(const std::map<DIST::Array*, std::set<DIST::Array*>>& arrayLinksByFuncCalls, const std::map<std::tuple<int, std::string, std::string>, std::pair<DIST::Array*, DIST::ArrayAccessInfo*>> declaredArrays, std::map<std::string, std::vector<LoopGraph*>>& loopGraph, std::vector<ParallelRegion*> parallelRegions, std::map<std::string, std::vector<Messages>>& SPF_messages, std::map<std::tuple<int, std::string, std::string>, DIST::Array*>& createdArrays, int sharedMemoryParallelization = 0);
#if __SPF
void functionAnalyzer(SgFile *file, std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo, const std::vector<LoopGraph*> &loops, std::vector<Messages> &messagesForFile, std::map<FuncInfo*, std::vector<SAPFOR::BasicBlock*>>& fullIR);

View File

@@ -423,7 +423,7 @@ static bool hasNonRect(SgForStmt *st, const vector<LoopGraph*> &parentLoops, vec
SgExpression* end = st->end();
SgExpression* step = st->step();
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
set<DIST::Array*> usedArrays;
@@ -609,7 +609,7 @@ static bool hasSubstringRef(SgStatement* loop)
return false;
}
void loopGraphAnalyzer(SgFile *file, vector<LoopGraph*> &loopGraph, const vector<SpfInterval*> &intervalTree, vector<Messages> &messages, int mpiProgram)
void loopGraphAnalyzer(SgFile *file, vector<LoopGraph*> &loopGraph, const vector<SpfInterval*> &intervalTree, vector<Messages> &messages, int sharedMemoryParallelization)
{
map<int, SpfInterval*> mapIntervals;
createMapOfinterval(mapIntervals, intervalTree);

View File

@@ -327,7 +327,7 @@ public:
bool isArrayTemplatesTheSame(DIST::Array*& sameTemplate, const uint64_t regId, const std::map<DIST::Array*, std::set<DIST::Array*>>& arrayLinksByFuncCalls)
{
if (mpiProgram != 0)
if (sharedMemoryParallelization != 0)
return true;
std::set<DIST::Array*> usedForRegAccess;

View File

@@ -567,12 +567,12 @@ void addToDistributionGraph(const map<LoopGraph*, map<DIST::Array*, ArrayInfo*>>
getRealArrayRefs(access.first, access.first, realArrayRefs[access.first], arrayLinksByFuncCalls);
bool has_Wr_edges = false, has_Ww_edges = false, has_Rr_edges = false;
has_Wr_edges = processLinks(currAccessesV, allArrays, realArrayRefs, mpiProgram == 0 ? G :loopGraph, WW_link);
has_Ww_edges |= processLinks(currAccessesV, allArrays, realArrayRefs, mpiProgram == 0 ? G : loopGraph, WR_link);
has_Wr_edges = processLinks(currAccessesV, allArrays, realArrayRefs, sharedMemoryParallelization == 0 ? G :loopGraph, WW_link);
has_Ww_edges |= processLinks(currAccessesV, allArrays, realArrayRefs, sharedMemoryParallelization == 0 ? G : loopGraph, WR_link);
if (!has_Wr_edges && !has_Ww_edges)
has_Rr_edges = processLinks(currAccessesV, allArrays, realArrayRefs, mpiProgram == 0 ? G : loopGraph, RR_link);
has_Rr_edges = processLinks(currAccessesV, allArrays, realArrayRefs, sharedMemoryParallelization == 0 ? G : loopGraph, RR_link);
if (mpiProgram)
if (sharedMemoryParallelization)
{
if (!has_Wr_edges && !has_Ww_edges && !has_Rr_edges)
for (auto& elem : realArrayRefs)

View File

@@ -7,7 +7,7 @@
struct SpfInterval;
void loopGraphAnalyzer(SgFile *file, std::vector<LoopGraph*> &loopGraph, const std::vector<SpfInterval*> &statisticTimes, std::vector<Messages> &messages, int mpiProgram);
void loopGraphAnalyzer(SgFile *file, std::vector<LoopGraph*> &loopGraph, const std::vector<SpfInterval*> &statisticTimes, std::vector<Messages> &messages, int sharedMemoryParallelization);
void findAllRefsToLables(SgStatement *st, std::map<int, std::vector<int>> &labelsRef, bool includeWrite = true);
std::map<LoopGraph*, ParallelDirective*> findAllDirectives(SgFile *file, const std::vector<LoopGraph*> &loops, const uint64_t regId);
std::vector<std::tuple<DIST::Array*, std::vector<long>, std::pair<std::string, int>>> findAllSingleRemotes(SgFile *file, const uint64_t regId, std::vector<ParallelRegion*> &regions);

View File

@@ -335,7 +335,7 @@ vector<int> matchSubscriptToLoopSymbols(const vector<SgForStmt*> &parentLoops, S
addInfoToMap(loopInfo, parentLoops[position], currOrigArrayS, arrayRef, dimNum, REMOTE_FALSE, currLine, numOfSubscriptions);
}
if (coefs.first < 0 && mpiProgram == 0)
if (coefs.first < 0 && sharedMemoryParallelization == 0)
{
if (currRegime == DATA_DISTR)
{
@@ -583,7 +583,7 @@ static void findArrayRef(const vector<SgForStmt*> &parentLoops, SgExpression *cu
if (itLoop->second->perfectLoop != depth)
break;
itLoop->second->hasIndirectAccess = true;
if (mpiProgram && side == RIGHT)
if (sharedMemoryParallelization && side == RIGHT)
itLoop->second->hasIndirectAccess = false;
}
mapArrayRef(currentSt, currExp, parentLoops, side, lineNum, loopInfo, sortedLoopGraph,
@@ -654,7 +654,7 @@ static void findArrayRef(const vector<SgForStmt*> &parentLoops, SgExpression *cu
const string key = string(OriginalSymbol(currExp->symbol())->identifier());
if (loopsPrivates.find(key) == loopsPrivates.end())
{
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
for (auto& loop : parentLoops)
{
@@ -687,7 +687,7 @@ static void findArrayRef(const vector<SgForStmt*> &parentLoops, SgExpression *cu
if (wasMapped)
{
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
int z = 0;
for (auto& loop : parentLoops)
@@ -2257,7 +2257,7 @@ void loopAnalyzer(SgFile *file, vector<ParallelRegion*> &regions, map<tuple<int,
s = start->expr(0)->lhs()->symbol();
if (s && privates.find(s->identifier()) == privates.end())
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
hasWritesToArray = true;
}
}
@@ -2776,7 +2776,7 @@ static void findArrayRefs(SgExpression *ex, SgStatement *st, string fName, int p
new DIST::Array(getShortName(uniqKey), symb->identifier(), ((SgArrayType*)(symb->type()))->dimension(),
getUniqArrayId(), decl->fileName(), decl->lineNumber(), arrayLocation, new Symbol(symb),
findOmpThreadPrivDecl(scope, ompThreadPrivate, symb), false, false,
inRegion, typeSize, mpiProgram ? DIST::NO_DISTR : DIST::DISTR);
inRegion, typeSize, sharedMemoryParallelization ? DIST::NO_DISTR : DIST::DISTR);
itNew = declaredArrays.insert(itNew, make_pair(uniqKey, make_pair(arrayToAdd, new DIST::ArrayAccessInfo())));

View File

@@ -50,7 +50,7 @@ extern int passDone;
extern REGIME currRegime;
extern std::vector<Messages>* currMessages;
extern int mpiProgram;
extern int sharedMemoryParallelization;
extern int ignoreIO;
extern int parallizeFreeLoops;

View File

@@ -1251,7 +1251,7 @@ void fillUsedArraysInExp(const pair<Statement*, Statement*> &interval, const int
bool checkRegionsResolving(const vector<ParallelRegion*> &regions,
const map<string, vector<FuncInfo*>> &allFuncInfo,
const map<string, CommonBlock*> &commonBlocks,
map<string, vector<Messages>> &SPF_messages, bool mpiProgram)
map<string, vector<Messages>> &SPF_messages, bool sharedMemoryParallelization)
{
bool error = false;
@@ -1319,7 +1319,7 @@ bool checkRegionsResolving(const vector<ParallelRegion*> &regions,
}
}
if (mpiProgram)
if (sharedMemoryParallelization)
return error;
// check local arrays
@@ -1611,7 +1611,7 @@ static void compliteUseOnlyList(SgStatement *func, const string &location, const
}
int resolveParRegions(vector<ParallelRegion*> &regions, const map<string, vector<FuncInfo*>> &allFuncInfo,
map<string, vector<Messages>> &SPF_messages, bool mpiProgram,
map<string, vector<Messages>> &SPF_messages, bool sharedMemoryParallelization,
map<string, map<int, set<string>>> &newDeclsToInclude)
{
bool error = false;
@@ -1619,7 +1619,7 @@ int resolveParRegions(vector<ParallelRegion*> &regions, const map<string, vector
map<string, FuncInfo*> funcMap;
createMapOfFunc(allFuncInfo, funcMap);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
map<string, map<int, set<string>>> copied;
@@ -1905,7 +1905,7 @@ int resolveParRegions(vector<ParallelRegion*> &regions, const map<string, vector
}
}
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
__spf_print(1, "insert DVM intervals\n");

View File

@@ -11,6 +11,6 @@ void fillRegionFunctions(std::vector<ParallelRegion*> &regions, const std::map<s
bool checkRegions(const std::vector<ParallelRegion*> &regions, const std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo, std::map<std::string, std::vector<Messages>> &SPF_messages);
int printCheckRegions(const char *fileName, const std::vector<ParallelRegion*> &regions, const std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo);
bool checkRegionsResolving(const std::vector<ParallelRegion*> &regions, const std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo, const std::map<std::string, CommonBlock*> &commonBlocks, std::map<std::string, std::vector<Messages>> &SPF_messages, bool mpiProgram);
int resolveParRegions(std::vector<ParallelRegion*>& regions, const std::map<std::string, std::vector<FuncInfo*>>& allFuncInfo, std::map<std::string, std::vector<Messages>>& SPF_messages, bool mpiProgram, std::map<std::string, std::map<int, std::set<std::string>>>& copyDecls);
bool checkRegionsResolving(const std::vector<ParallelRegion*> &regions, const std::map<std::string, std::vector<FuncInfo*>> &allFuncInfo, const std::map<std::string, CommonBlock*> &commonBlocks, std::map<std::string, std::vector<Messages>> &SPF_messages, bool sharedMemoryParallelization);
int resolveParRegions(std::vector<ParallelRegion*>& regions, const std::map<std::string, std::vector<FuncInfo*>>& allFuncInfo, std::map<std::string, std::vector<Messages>>& SPF_messages, bool sharedMemoryParallelization, std::map<std::string, std::map<int, std::set<std::string>>>& copyDecls);
void insertRealignsBeforeFragments(ParallelRegion* reg, SgFile* file, const std::set<DIST::Array*>& distrArrays, const std::map<DIST::Array*, std::set<DIST::Array*>>& arrayLinksByFuncCalls);

View File

@@ -506,7 +506,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
insertIntrinsicStat(getObjectForFileFromMap(file_name, allFuncInfo));
}
else if (curr_regime == LOOP_GRAPH)
loopGraphAnalyzer(file, getObjectForFileFromMap(file_name, loopGraph), getObjectForFileFromMap(file_name, intervals), getObjectForFileFromMap(file_name, SPF_messages), mpiProgram);
loopGraphAnalyzer(file, getObjectForFileFromMap(file_name, loopGraph), getObjectForFileFromMap(file_name, intervals), getObjectForFileFromMap(file_name, SPF_messages), sharedMemoryParallelization);
else if (curr_regime == VERIFY_ENDDO)
{
bool res = EndDoLoopChecker(file, getObjectForFileFromMap(file_name, SPF_messages));
@@ -1076,7 +1076,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
if (curr_regime == ONLY_ARRAY_GRAPH)
keepFiles = 1;
if (mpiProgram)
if (sharedMemoryParallelization)
{
for (auto& byFile : loopGraph)
for (auto& loop : byFile.second)
@@ -1387,7 +1387,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
vector<string> result;
set<DIST::Array*> arraysDone;
if (mpiProgram)
if (sharedMemoryParallelization)
{
bool wasDone = false;
for (int z = 0; z < parallelRegions.size(); ++z)
@@ -1411,7 +1411,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
{
for (auto& byFile : loopGraph)
for (auto& loop : byFile.second)
loop->createVirtualTemplateLinks(arrayLinksByFuncCalls, SPF_messages, mpiProgram > 0);
loop->createVirtualTemplateLinks(arrayLinksByFuncCalls, SPF_messages, sharedMemoryParallelization > 0);
//add dummy array
DataDirective& dataDirectives = parallelRegions[0]->GetDataDirToModify();
@@ -1456,7 +1456,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
//recalculate array sizes after expression substitution
recalculateArraySizes(arraysDone, allArrays.GetArrays(), arrayLinksByFuncCalls, allFuncInfo);
createDistributionDirs(reducedG, allArrays, dataDirectives, SPF_messages, arrayLinksByFuncCalls, mpiProgram > 0);
createDistributionDirs(reducedG, allArrays, dataDirectives, SPF_messages, arrayLinksByFuncCalls, sharedMemoryParallelization > 0);
ALGORITHMS_DONE[CREATE_DISTIBUTION][z] = 1;
}
@@ -1574,7 +1574,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
}
else if (curr_regime == RESOLVE_PAR_REGIONS)
{
bool error = resolveParRegions(parallelRegions, allFuncInfo, SPF_messages, mpiProgram, newCopyDeclToIncl);
bool error = resolveParRegions(parallelRegions, allFuncInfo, SPF_messages, sharedMemoryParallelization, newCopyDeclToIncl);
if (error)
internalExit = 1;
}
@@ -1610,7 +1610,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
else if (curr_regime == INSERT_PARALLEL_DIRS || curr_regime == EXTRACT_PARALLEL_DIRS)
{
bool cond = (folderName != NULL) || (consoleMode) || (!consoleMode && curr_regime == EXTRACT_PARALLEL_DIRS);
if (cond && mpiProgram == 0)
if (cond && sharedMemoryParallelization == 0)
{
//insert template declaration to main program
const bool extract = (curr_regime == EXTRACT_PARALLEL_DIRS);
@@ -1659,7 +1659,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
printDefUseSets("_defUseList.txt", defUseByFunctions);
}
else if (curr_regime == LOOP_ANALYZER_DATA_DIST_S0)
excludeArraysFromDistribution(arrayLinksByFuncCalls, declaredArrays, loopGraph, parallelRegions, SPF_messages, createdArrays, mpiProgram);
excludeArraysFromDistribution(arrayLinksByFuncCalls, declaredArrays, loopGraph, parallelRegions, SPF_messages, createdArrays, sharedMemoryParallelization);
else if (curr_regime == LOOP_ANALYZER_DATA_DIST_S1)
{
for (int z = 0; z < parallelRegions.size(); ++z)
@@ -1671,7 +1671,7 @@ static bool runAnalysis(SgProject &project, const int curr_regime, const bool ne
}
else if (curr_regime == PRINT_PAR_REGIONS_ERRORS)
{
bool error = checkRegionsResolving(parallelRegions, allFuncInfo, commonBlocks, SPF_messages, mpiProgram);
bool error = checkRegionsResolving(parallelRegions, allFuncInfo, commonBlocks, SPF_messages, sharedMemoryParallelization);
if (error)
internalExit = 1;
}
@@ -2135,7 +2135,7 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
case INSERT_PARALLEL_DIRS_NODIST:
{
mpiProgram = 1;
sharedMemoryParallelization = 1;
string additionalName = (consoleMode && folderName == NULL) ? "__shared" : "";
@@ -2168,7 +2168,7 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
int maxDimsIdxReg = -1;
int lastI = 1;
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
lastI = countMaxValuesForParallelVariants(maxDims, maxDimsIdx, maxDimsIdxReg, currentVariants);
if (genAllVars == 0)
lastI = 1;
@@ -2176,7 +2176,7 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
for (int i = 0; i < lastI; ++i)
{
//if specific variant number is requested, skip all others
if (genSpecificVar >= 0 && i != genSpecificVar && mpiProgram == 0)
if (genSpecificVar >= 0 && i != genSpecificVar && sharedMemoryParallelization == 0)
continue;
string tmpFolder = "";
@@ -2203,7 +2203,7 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
runAnalysis(*project, INSERT_PARALLEL_DIRS, false, consoleMode ? additionalName.c_str() : NULL, folderName);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
runPass(CREATE_REMOTES, proj_name, folderName);
runPass(REMOVE_AND_CALC_SHADOW, proj_name, folderName);
@@ -2214,11 +2214,11 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
runPass(RESTORE_LOOP_FROM_ASSIGN, proj_name, folderName);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
runPass(ADD_TEMPL_TO_USE_ONLY, proj_name, folderName);
runAnalysis(*project, INSERT_REGIONS, false);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
runPass(GROUP_ACTUAL_AND_REMOTE, proj_name, folderName);
runAnalysis(*project, CALCULATE_STATS_SCHEME, false);
@@ -2242,7 +2242,7 @@ void runPass(const int curr_regime, const char *proj_name, const char *folderNam
runPass(REVERSE_CREATED_NESTED_LOOPS, proj_name, folderName);
runPass(CLEAR_SPF_DIRS, proj_name, folderName);
runPass(RESTORE_LOOP_FROM_ASSIGN_BACK, proj_name, folderName);
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
runPass(GROUP_ACTUAL_AND_REMOTE_RESTORE, proj_name, folderName);
//clear shadow grouping
@@ -2567,7 +2567,7 @@ int main(int argc, char **argv)
else if (string(curr_arg) == "-fdvm")
convertFiles(argc - i, argv + i);
else if (string(curr_arg) == "-mpi") {
mpiProgram = 1;
sharedMemoryParallelization = 1;
ignoreArrayDistributeState = true;
}
else if (string(curr_arg) == "-client")
@@ -2622,7 +2622,7 @@ int main(int argc, char **argv)
}
}
if (mpiProgram == 1)
if (sharedMemoryParallelization == 1)
{
keepDvmDirectives = 0;
ignoreIO = 1;

View File

@@ -9,7 +9,7 @@ extern int automaticDeprecateArrays;
extern int maxShadowWidth;
extern int langOfMessages;
extern bool removeNestedIntervals;
extern int mpiProgram;
extern int sharedMemoryParallelization;
extern int parallizeFreeLoops;
extern int ignoreIO;
extern int parseForInlining;

View File

@@ -35,7 +35,7 @@ int intervals_threshold = 100; // threshold for intervals
bool removeNestedIntervals = false; // nested intervals removal flag
int langOfMessages = 1; // 0 - ENG, 1 - RUS
int parallizeFreeLoops = 0; // parallize free calculations
int mpiProgram = 0; // detected mpi calls
int sharedMemoryParallelization = 0; // detected mpi calls
int ignoreIO = 0; // ignore io checker for arrays (DVM IO limitations)
int parseForInlining = 0; // special regime for files parsing for inliner
int dumpIR = 0; // allow dump IR after BUILD_IR pass

View File

@@ -3344,7 +3344,7 @@ SgProject* createProject(const char* proj_name,
if (detectMpiCalls(project, SPF_messages))
{
mpiProgram = 1;
sharedMemoryParallelization = 1;
keepDvmDirectives = 0;
ignoreIO = 1;
parallizeFreeLoops = 0;

View File

@@ -203,7 +203,7 @@ const string printVersionAsFortranComm()
ret += "! *** consider DVMH directives\n";
if (keepSpfDirs)
ret += "! *** save SPF directives\n";
if (mpiProgram)
if (sharedMemoryParallelization)
ret += "! *** MPI program regime (shared memory parallelization)\n";
if (ignoreIO)
ret += "! *** ignore I/O checker for arrays (DVM I/O limitations)\n";

View File

@@ -166,10 +166,10 @@ static void setOptions(const short* options, bool isBuildParallel = false, const
removeNestedIntervals = (intOptions[KEEP_LOOPS_CLOSE_NESTING] == 1);
showDebug = (intOptions[DEBUG_PRINT_ON] == 1);
mpiProgram = (mpiProgram != 1) ? intOptions[MPI_PROGRAM] : mpiProgram;
parallizeFreeLoops = (mpiProgram == 1) ? 0 : intOptions[PARALLIZE_FREE_LOOPS];
ignoreIO = (mpiProgram == 1) ? 1 : intOptions[IGNORE_IO_SAPFOR];
keepDvmDirectives = (mpiProgram == 1) ? 0 : intOptions[KEEP_DVM_DIRECTIVES];
sharedMemoryParallelization = (sharedMemoryParallelization != 1) ? intOptions[MPI_PROGRAM] : sharedMemoryParallelization;
parallizeFreeLoops = (sharedMemoryParallelization == 1) ? 0 : intOptions[PARALLIZE_FREE_LOOPS];
ignoreIO = (sharedMemoryParallelization == 1) ? 1 : intOptions[IGNORE_IO_SAPFOR];
keepDvmDirectives = (sharedMemoryParallelization == 1) ? 0 : intOptions[KEEP_DVM_DIRECTIVES];
parseForInlining = intOptions[PARSE_FOR_INLINE];
@@ -819,7 +819,7 @@ int SPF_GetArrayDistribution(void*& context, int winHandler, short *options, sho
runPassesForVisualizer(projName, { CREATE_TEMPLATE_LINKS });
else if (regime == 1)
{
if (mpiProgram)
if (sharedMemoryParallelization)
runPassesForVisualizer(projName, { SELECT_ARRAY_DIM_CONF });
else
runPassesForVisualizer(projName, { LOOP_ANALYZER_DATA_DIST_S1 });
@@ -1061,7 +1061,7 @@ int SPF_CreateParallelVariant(void*& context, int winHandler, short *options, sh
throw (-5);
int countOfDist = 0;
if (mpiProgram == 0)
if (sharedMemoryParallelization == 0)
{
map<uint64_t, vector<pair<int64_t, int64_t>>> varLens;
for (int i = 0, k = 0; i < *varLen; i += 3, ++k)
@@ -1892,7 +1892,7 @@ int SPF_SharedMemoryParallelization(void*& context, int winHandler, short* optio
MessageManager::clearCache();
MessageManager::setWinHandler(winHandler);
ignoreArrayDistributeState = true;
mpiProgram = 1;
sharedMemoryParallelization = 1;
return simpleTransformPass(INSERT_PARALLEL_DIRS_NODIST, options, projName, folderName, output, outputSize, outputMessage, outputMessageSize, true);
}