projects
/
qwerkisync
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Added pre and post procesing "event handlers" to the DB backends. Initially u
[qwerkisync]
/
SyncerThread.cpp
diff --git
a/SyncerThread.cpp
b/SyncerThread.cpp
index
55fce3c
..
2a5e3b1
100644
(file)
--- a/
SyncerThread.cpp
+++ b/
SyncerThread.cpp
@@
-28,7
+28,6
@@
#include "EventTypes/EventFromFileList.h"
#include "EventTypes/iEvent.h"
#include "EventLogBackupManager.h"
#include "EventTypes/EventFromFileList.h"
#include "EventTypes/iEvent.h"
#include "EventLogBackupManager.h"
-#include "EventLogReindexer.h"
#include "EventParsers/Factory.h"
#include "EventParsers/iEventParser.h"
#include "EventParsers/Factory.h"
#include "EventParsers/iEventParser.h"
@@
-37,6
+36,7
@@
#include <QFile>
#include <QFileInfo>
#include <QProcess>
#include <QFile>
#include <QFileInfo>
#include <QProcess>
+#include <QSharedPointer>
#include <stdexcept>
#include <stdexcept>
@@
-187,7
+187,8
@@
void SyncerThread::run()
qDebug() << "Importing new events";
qDebug() << "Importing new events";
- // Re-parse the new events
+ // Re-parse the new events and insert them
+ allBackends.PreInsert();
{
int idx = 0;
foreach(QString filename, newHashesByPath.keys())
{
int idx = 0;
foreach(QString filename, newHashesByPath.keys())
@@
-196,8
+197,6
@@
void SyncerThread::run()
foreach(iHashable::Hash newHash, newHashesByPath.value(filename))
recordsToReturn.append(pathsByHashes.value(newHash).second);
foreach(iHashable::Hash newHash, newHashesByPath.value(filename))
recordsToReturn.append(pathsByHashes.value(newHash).second);
- ++idx;
-
// Repeating an action that caused an exception last time
// shouldn't happen again, but just in case...
try
// Repeating an action that caused an exception last time
// shouldn't happen again, but just in case...
try
@@
-213,6
+212,8
@@
void SyncerThread::run()
{
qDebug() << "Unable to insert event: " << exception.what();
}
{
qDebug() << "Unable to insert event: " << exception.what();
}
+
+ emit EventProcessed(++idx, newHashes.count());
}
}
catch(const std::runtime_error &exception)
}
}
catch(const std::runtime_error &exception)
@@
-220,14
+221,12
@@
void SyncerThread::run()
qDebug() << exception.what() << endl;
}
qDebug() << exception.what() << endl;
}
+ // Just to make sure the listeners are synced even if the
+ // earlier call is skipped due to errors...
emit EventProcessed(idx, newHashes.count());
}
}
emit EventProcessed(idx, newHashes.count());
}
}
-
- // Reorder the DB IDs as Nokia are guilty of both premature
- // optimisation as well as closed source UIs...
- EventLogReindexer reindexer;
- reindexer.Reindex();
+ allBackends.PostInsert(); // Perform any post-insert cleanup (i.e. reindexing)
// Need to find a better way of refreshing the conversations view...
QProcess::execute("pkill rtcom");
// Need to find a better way of refreshing the conversations view...
QProcess::execute("pkill rtcom");