X-Git-Url: https://vcs.maemo.org/git/?a=blobdiff_plain;f=SyncerThread.cpp;h=e01cb92fac0742e780f8371fa30235b175f2d4b4;hb=43c287cf53b85a8a347ae12ce8d938b256357ce3;hp=55fce3cd266344d970a6db4629b79e2cc859b573;hpb=4abcc9ab77d80562371024c243eb6b4f9f28dfcc;p=qwerkisync diff --git a/SyncerThread.cpp b/SyncerThread.cpp index 55fce3c..e01cb92 100644 --- a/SyncerThread.cpp +++ b/SyncerThread.cpp @@ -28,7 +28,6 @@ #include "EventTypes/EventFromFileList.h" #include "EventTypes/iEvent.h" #include "EventLogBackupManager.h" -#include "EventLogReindexer.h" #include "EventParsers/Factory.h" #include "EventParsers/iEventParser.h" @@ -37,6 +36,7 @@ #include #include #include +#include #include @@ -92,9 +92,11 @@ void SyncerThread::run() QDir().rmpath(CurrentSettings().Directory()); DBBackends::AllBackends allBackends(CurrentSettings()); - EventProcessors::Writer eventWriter(CurrentSettings()); + NumberToNameLookup lookup; // Prepare the telephone-address book ID lookup. + EventProcessors::Writer eventWriter(CurrentSettings(), lookup); QObject::connect(&eventWriter, SIGNAL(EventProcessed(int,int)), this, SIGNAL(EventProcessed(int,int))); allBackends.Process(eventWriter); + QObject::disconnect(&eventWriter, SIGNAL(EventProcessed(int,int)), this, SIGNAL(EventProcessed(int,int))); } else { @@ -167,7 +169,7 @@ void SyncerThread::run() foreach(iHashable::Hash hash, newHashes) qDebug() << hash << endl; - // Now an optimisation: group the new hases by the files they come + // Now an optimisation: group the new hashes by the files they come // from. This enables each file to only be parsed once and return // all the required events from it. QHash > newHashesByPath; @@ -187,7 +189,8 @@ void SyncerThread::run() qDebug() << "Importing new events"; - // Re-parse the new events + // Re-parse the new events and insert them + allBackends.PreInsert(); { int idx = 0; foreach(QString filename, newHashesByPath.keys()) @@ -196,8 +199,6 @@ void SyncerThread::run() foreach(iHashable::Hash newHash, newHashesByPath.value(filename)) recordsToReturn.append(pathsByHashes.value(newHash).second); - ++idx; - // Repeating an action that caused an exception last time // shouldn't happen again, but just in case... try @@ -213,6 +214,8 @@ void SyncerThread::run() { qDebug() << "Unable to insert event: " << exception.what(); } + + emit EventProcessed(++idx, newHashes.count()); } } catch(const std::runtime_error &exception) @@ -220,14 +223,12 @@ void SyncerThread::run() qDebug() << exception.what() << endl; } + // Just to make sure the listeners are synced even if the + // earlier call is skipped due to errors... emit EventProcessed(idx, newHashes.count()); } } - - // Reorder the DB IDs as Nokia are guilty of both premature - // optimisation as well as closed source UIs... - EventLogReindexer reindexer; - reindexer.Reindex(); + allBackends.PostInsert(); // Perform any post-insert cleanup (i.e. reindexing) // Need to find a better way of refreshing the conversations view... QProcess::execute("pkill rtcom");