diff --git a/emper/Fibril.hpp b/emper/Fibril.hpp
index 95f7a7219b75f79a40317b3dfc590830e8c0e4e0..7c5cb3fd8bde37c04e48d31a593517e5f788e0c3 100644
--- a/emper/Fibril.hpp
+++ b/emper/Fibril.hpp
@@ -39,7 +39,7 @@ private:
 	uint32_t reserveStealCount = 0;
 
 #ifdef EMPER_MADVISE
-	std::atomic<int> resumable = 0;
+	std::atomic<bool> resumable = false;
 #endif
 
 #endif /* EMPER_LOCKED_FIBRIL */
@@ -100,7 +100,7 @@ private:
 				/* unmap unused stack pages */
 				Context::currentContext->unmap(cont.sp);
 				/* set resumable to 'true' */
-				if (-1 == resumable.exchange(1, std::memory_order_acq_rel)) {
+				if (true == resumable.exchange(true, std::memory_order_acq_rel)) {
 					/* last one joined, but could not resume
 					 * because of us, so we can resume.
 					 * resume, no return */
@@ -115,7 +115,7 @@ private:
 			if (stack != Context::currentContext) {
 #ifdef EMPER_MADVISE
 				/* get resumable, signal we tried to resume */
-				if (0 == resumable.exchange(-1, std::memory_order_acq_rel)) {
+				if (false == resumable.exchange(true, std::memory_order_acq_rel)) {
 					/* stack owner is unmapping pages, we don't
 					 * wait, stack owner sees we were here.
 					 * random steal */
@@ -270,7 +270,7 @@ public:
 
 		reserveStealCount = 0;
 #ifdef EMPER_MADVISE
-		resumable.store(0, std::memory_order_relaxed);
+		resumable.store(false, std::memory_order_relaxed);
 #endif /* EMPER_MADVISE */
 	}
 #endif /* EMPER_LOCKED_FIBRIL */