diff --git a/fs/procfs/fs_procfspressure.c b/fs/procfs/fs_procfspressure.c index 056cb02806465..11c19a9f7e994 100644 --- a/fs/procfs/fs_procfspressure.c +++ b/fs/procfs/fs_procfspressure.c @@ -131,11 +131,11 @@ static int pressure_open(FAR struct file *filep, FAR const char *relpath, return -ENOMEM; } - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); priv->interval = CLOCK_MAX; filep->f_priv = priv; dq_addfirst(&priv->entry, &g_pressure_memory_queue); - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); return OK; } @@ -148,9 +148,9 @@ static int pressure_close(FAR struct file *filep) FAR struct pressure_file_s *priv = filep->f_priv; uint32_t flags; - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); dq_rem(&priv->entry, &g_pressure_memory_queue); - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); fs_heap_free(priv); return OK; } @@ -169,10 +169,10 @@ static ssize_t pressure_read(FAR struct file *filep, FAR char *buffer, off_t offset; ssize_t ret; - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); remain = g_remaining; largest = g_largest; - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); ret = procfs_snprintf(buf, sizeof(buf), "remaining %zu, largest:%zu\n", remain, largest); @@ -226,14 +226,14 @@ static ssize_t pressure_write(FAR struct file *filep, FAR const char *buffer, interval = USEC2TICK(interval); } - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); /* We should trigger the first event immediately */ priv->lasttick = CLOCK_MAX; priv->threshold = threshold; priv->interval = interval; - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); return buflen; } @@ -248,7 +248,7 @@ static int pressure_poll(FAR struct file *filep, FAR struct pollfd *fds, clock_t current = clock_systime_ticks(); uint32_t flags; - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); if (setup) { if (priv->fds == NULL) @@ -265,14 +265,14 @@ static int pressure_poll(FAR struct file *filep, FAR struct pollfd *fds, CLOCK_MAX || current - priv->lasttick >= priv->interval)) { priv->lasttick = current; - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); poll_notify(&priv->fds, 1, POLLPRI); return OK; } } else { - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); return -EBUSY; } } @@ -282,7 +282,7 @@ static int pressure_poll(FAR struct file *filep, FAR struct pollfd *fds, fds->priv = NULL; } - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); return OK; } @@ -302,12 +302,12 @@ static int pressure_dup(FAR const struct file *oldp, FAR struct file *newp) return -ENOMEM; } - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); memcpy(newpriv, oldpriv, sizeof(struct pressure_file_s)); dq_addfirst(&newpriv->entry, &g_pressure_memory_queue); newpriv->fds = NULL; newp->f_priv = newpriv; - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); return OK; } @@ -425,7 +425,7 @@ void mm_notify_pressure(size_t remaining, size_t largest) FAR dq_entry_t *tmp; uint32_t flags; - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); g_remaining = remaining; g_largest = largest; @@ -463,11 +463,11 @@ void mm_notify_pressure(size_t remaining, size_t largest) } pressure->lasttick = current; - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); poll_notify(&pressure->fds, 1, POLLPRI); - flags = spin_lock_irqsave(&g_pressure_lock); + flags = raw_spin_lock_irqsave(&g_pressure_lock); } - spin_unlock_irqrestore(&g_pressure_lock, flags); + raw_spin_unlock_irqrestore(&g_pressure_lock, flags); } diff --git a/fs/v9fs/virtio_9p.c b/fs/v9fs/virtio_9p.c index 8e7cc8fb8d338..85fe57944db45 100644 --- a/fs/v9fs/virtio_9p.c +++ b/fs/v9fs/virtio_9p.c @@ -174,7 +174,7 @@ static int virtio_9p_request(FAR struct v9fs_transport_s *transport, vb[payload->wcount + i].len = payload->riov[i].iov_len; } - flags = spin_lock_irqsave(&priv->lock); + flags = raw_spin_lock_irqsave(&priv->lock); ret = virtqueue_add_buffer(vq, vb, payload->wcount, payload->rcount, payload); if (ret < 0) @@ -185,7 +185,7 @@ static int virtio_9p_request(FAR struct v9fs_transport_s *transport, virtqueue_kick(vq); out: - spin_unlock_irqrestore(&priv->lock, flags); + raw_spin_unlock_irqrestore(&priv->lock, flags); return ret; } diff --git a/mm/iob/iob_alloc.c b/mm/iob/iob_alloc.c index a0fb94ec523d9..893ec8cfb26f8 100644 --- a/mm/iob/iob_alloc.c +++ b/mm/iob/iob_alloc.c @@ -79,7 +79,7 @@ static FAR struct iob_s *iob_alloc_committed(void) * to protect the committed list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Take the I/O buffer from the head of the committed list */ @@ -98,7 +98,7 @@ static FAR struct iob_s *iob_alloc_committed(void) iob->io_pktlen = 0; /* Total length of the packet */ } - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return iob; } @@ -173,7 +173,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout) * we are waiting for I/O buffers to become free. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Try to get an I/O buffer */ @@ -191,7 +191,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout) g_iob_count--; } - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); if (timeout == UINT_MAX) { @@ -217,7 +217,7 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout) return iob; } - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return iob; } @@ -304,9 +304,9 @@ FAR struct iob_s *iob_tryalloc(bool throttled) * to protect the free list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); iob = iob_tryalloc_internal(throttled); - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return iob; } diff --git a/mm/iob/iob_alloc_qentry.c b/mm/iob/iob_alloc_qentry.c index d4ae2fff13793..3eccdcd2c977d 100644 --- a/mm/iob/iob_alloc_qentry.c +++ b/mm/iob/iob_alloc_qentry.c @@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void) * to protect the committed list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Take the I/O buffer from the head of the committed list */ @@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void) iobq->qe_head = NULL; /* Nothing is contained */ } - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return iobq; } @@ -127,7 +127,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void) * re-enabled while we are waiting for I/O buffers to become free. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Try to get an I/O buffer chain container. */ @@ -139,7 +139,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void) */ g_qentry_wait++; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); ret = nxsem_wait_uninterruptible(&g_qentry_sem); if (ret >= 0) { @@ -156,7 +156,7 @@ static FAR struct iob_qentry_s *iob_allocwait_qentry(void) return qentry; } - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return qentry; } @@ -212,9 +212,9 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void) * to protect the free list: We disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); iobq = iob_tryalloc_qentry_internal(); - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); return iobq; } diff --git a/mm/iob/iob_free.c b/mm/iob/iob_free.c index 86af4304d26f3..07b21c1c401b3 100644 --- a/mm/iob/iob_free.c +++ b/mm/iob/iob_free.c @@ -132,7 +132,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) * interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Which list? If there is a task waiting for an IOB, then put * the IOB on either the free list or on the committed list where @@ -146,7 +146,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) g_iob_count++; iob->io_flink = g_iob_committed; g_iob_committed = iob; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); nxsem_post(&g_iob_sem); } #if CONFIG_IOB_THROTTLE > 0 @@ -155,7 +155,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) iob->io_flink = g_iob_committed; g_iob_committed = iob; g_throttle_wait--; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); nxsem_post(&g_throttle_sem); } #endif @@ -164,7 +164,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob) g_iob_count++; iob->io_flink = g_iob_freelist; g_iob_freelist = iob; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); } DEBUGASSERT(g_iob_count <= CONFIG_IOB_NBUFFERS); diff --git a/mm/iob/iob_free_qentry.c b/mm/iob/iob_free_qentry.c index 489cb88449215..6ec91dba686db 100644 --- a/mm/iob/iob_free_qentry.c +++ b/mm/iob/iob_free_qentry.c @@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq) * interrupts very briefly. */ - flags = spin_lock_irqsave(&g_iob_lock); + flags = raw_spin_lock_irqsave(&g_iob_lock); /* Which list? If there is a task waiting for an IOB chain, then put * the IOB chain on either the free list or on the committed list where @@ -73,14 +73,14 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq) iobq->qe_flink = g_iob_qcommitted; g_iob_qcommitted = iobq; g_qentry_wait--; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); nxsem_post(&g_qentry_sem); } else { iobq->qe_flink = g_iob_freeqlist; g_iob_freeqlist = iobq; - spin_unlock_irqrestore(&g_iob_lock, flags); + raw_spin_unlock_irqrestore(&g_iob_lock, flags); } /* And return the I/O buffer chain container after the one that was freed */ diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index b5b6d3ee56153..21db5d57e3b14 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -165,7 +165,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size, mask = KASAN_FIRST_WORD_MASK(bit); size /= KASAN_SHADOW_SCALE; - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); while (size >= nbit) { if (poisoned) @@ -197,7 +197,7 @@ static void kasan_set_poison(FAR const void *addr, size_t size, } } - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); } /**************************************************************************** @@ -231,12 +231,12 @@ void kasan_register(FAR void *addr, FAR size_t *size) region->begin = (uintptr_t)addr; region->end = region->begin + *size; - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS); g_region[g_region_count++] = region; - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); kasan_start(); kasan_poison(addr, *size); @@ -248,7 +248,7 @@ void kasan_unregister(FAR void *addr) irqstate_t flags; size_t i; - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); for (i = 0; i < g_region_count; i++) { if (g_region[i]->begin == (uintptr_t)addr) @@ -260,5 +260,5 @@ void kasan_unregister(FAR void *addr) } } - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); } diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index d1ba3bf73a535..fca989db462d2 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -142,14 +142,14 @@ static void kasan_set_poison(FAR const void *addr, } size = KASAN_SHADOW_SIZE(size); - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); while (size--) { p[size] = value; } - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); } /**************************************************************************** @@ -186,12 +186,12 @@ void kasan_register(FAR void *addr, FAR size_t *size) region->begin = (uintptr_t)addr; region->end = region->begin + *size; - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); DEBUGASSERT(g_region_count <= CONFIG_MM_KASAN_REGIONS); g_region[g_region_count++] = region; - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); kasan_start(); kasan_poison(addr, *size); @@ -203,7 +203,7 @@ void kasan_unregister(FAR void *addr) irqstate_t flags; size_t i; - flags = spin_lock_irqsave(&g_lock); + flags = raw_spin_lock_irqsave(&g_lock); for (i = 0; i < g_region_count; i++) { if (g_region[i]->begin == (uintptr_t)addr) @@ -215,5 +215,5 @@ void kasan_unregister(FAR void *addr) } } - spin_unlock_irqrestore(&g_lock, flags); + raw_spin_unlock_irqrestore(&g_lock, flags); } diff --git a/mm/mempool/mempool.c b/mm/mempool/mempool.c index 89b3f80e24f06..f7a411fd5bd85 100644 --- a/mm/mempool/mempool.c +++ b/mm/mempool/mempool.c @@ -345,7 +345,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) irqstate_t flags; retry: - flags = spin_lock_irqsave(&pool->lock); + flags = raw_spin_lock_irqsave(&pool->lock); blk = mempool_remove_queue(pool, &pool->queue); if (blk == NULL) { @@ -354,7 +354,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) blk = mempool_remove_queue(pool, &pool->iqueue); if (blk == NULL) { - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); return blk; } } @@ -362,7 +362,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) { size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool); - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); if (pool->expandsize >= blocksize + sizeof(sq_entry_t)) { size_t nexpand = (pool->expandsize - sizeof(sq_entry_t)) / @@ -376,7 +376,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) } kasan_poison(base, size); - flags = spin_lock_irqsave(&pool->lock); + flags = raw_spin_lock_irqsave(&pool->lock); mempool_add_queue(pool, &pool->queue, base, nexpand, blocksize); sq_addlast((FAR sq_entry_t *)(base + nexpand * blocksize), @@ -396,7 +396,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) } pool->nalloc++; - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); #if CONFIG_MM_BACKTRACE >= 0 mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *) @@ -424,7 +424,7 @@ FAR void *mempool_allocate(FAR struct mempool_s *pool) void mempool_release(FAR struct mempool_s *pool, FAR void *blk) { - irqstate_t flags = spin_lock_irqsave(&pool->lock); + irqstate_t flags = raw_spin_lock_irqsave(&pool->lock); size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool); #if CONFIG_MM_BACKTRACE >= 0 FAR struct mempool_backtrace_s *buf = @@ -461,7 +461,7 @@ void mempool_release(FAR struct mempool_s *pool, FAR void *blk) } kasan_poison(blk, pool->blocksize); - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); if (pool->wait && pool->expandsize == 0) { int semcount; @@ -495,13 +495,13 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info) DEBUGASSERT(pool != NULL && info != NULL); - flags = spin_lock_irqsave(&pool->lock); + flags = raw_spin_lock_irqsave(&pool->lock); info->ordblks = sq_count(&pool->queue); info->iordblks = sq_count(&pool->iqueue); info->aordblks = pool->nalloc; info->arena = sq_count(&pool->equeue) * sizeof(sq_entry_t) + (info->aordblks + info->ordblks + info->iordblks) * blocksize; - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); info->sizeblks = blocksize; if (pool->wait && pool->expandsize == 0) { @@ -534,11 +534,11 @@ mempool_info_task(FAR struct mempool_s *pool, if (task->pid == PID_MM_FREE) { - irqstate_t flags = spin_lock_irqsave(&pool->lock); + irqstate_t flags = raw_spin_lock_irqsave(&pool->lock); size_t count = sq_count(&pool->queue) + sq_count(&pool->iqueue); - spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, flags); info.aordblks += count; info.uordblks += count * blocksize; } diff --git a/mm/mm_gran/mm_grancritical.c b/mm/mm_gran/mm_grancritical.c index 06481e1de06f4..4d853e85138fc 100644 --- a/mm/mm_gran/mm_grancritical.c +++ b/mm/mm_gran/mm_grancritical.c @@ -59,7 +59,7 @@ int gran_enter_critical(FAR struct gran_s *priv) { #ifdef CONFIG_GRAN_INTR - priv->irqstate = spin_lock_irqsave(&priv->lock); + priv->irqstate = raw_spin_lock_irqsave(&priv->lock); return OK; #else return nxmutex_lock(&priv->lock); @@ -69,7 +69,7 @@ int gran_enter_critical(FAR struct gran_s *priv) void gran_leave_critical(FAR struct gran_s *priv) { #ifdef CONFIG_GRAN_INTR - spin_unlock_irqrestore(&priv->lock, priv->irqstate); + raw_spin_unlock_irqrestore(&priv->lock, priv->irqstate); #else nxmutex_unlock(&priv->lock); #endif diff --git a/sched/clock/clock_adjtime.c b/sched/clock/clock_adjtime.c index c497a773f7237..97a2992fa296c 100644 --- a/sched/clock/clock_adjtime.c +++ b/sched/clock/clock_adjtime.c @@ -63,7 +63,7 @@ static void adjtime_wdog_callback(wdparm_t arg) UNUSED(arg); - flags = spin_lock_irqsave(&g_adjtime_lock); + flags = raw_spin_lock_irqsave(&g_adjtime_lock); #ifdef CONFIG_ARCH_HAVE_ADJTIME up_adjtime(0); @@ -74,7 +74,7 @@ static void adjtime_wdog_callback(wdparm_t arg) #endif g_adjtime_ppb = 0; - spin_unlock_irqrestore(&g_adjtime_lock, flags); + raw_spin_unlock_irqrestore(&g_adjtime_lock, flags); } /* Query remaining adjustment in microseconds */ diff --git a/sched/clock/clock_gettime.c b/sched/clock/clock_gettime.c index ebebef61a495d..acdb16147acc2 100644 --- a/sched/clock/clock_gettime.c +++ b/sched/clock/clock_gettime.c @@ -58,7 +58,7 @@ static clock_t clock_process_runtime(FAR struct tcb_s *tcb) group = tcb->group; - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); sq_for_every(&group->tg_members, curr) { tcb = container_of(curr, struct tcb_s, member); @@ -66,7 +66,7 @@ static clock_t clock_process_runtime(FAR struct tcb_s *tcb) runtime += tcb->run_time; } - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); return runtime; # else /* HAVE_GROUP_MEMBERS */ return tcb->run_time; @@ -109,9 +109,9 @@ void nxclock_gettime(clockid_t clock_id, FAR struct timespec *tp) * was last set, this gives us the current time. */ - flags = spin_lock_irqsave(&g_basetime_lock); + flags = raw_spin_lock_irqsave(&g_basetime_lock); clock_timespec_add(&g_basetime, &ts, tp); - spin_unlock_irqrestore(&g_basetime_lock, flags); + raw_spin_unlock_irqrestore(&g_basetime_lock, flags); #else clock_timekeeping_get_wall_time(tp); #endif diff --git a/sched/clock/clock_initialize.c b/sched/clock/clock_initialize.c index ba02b0893a646..e631606619137 100644 --- a/sched/clock/clock_initialize.c +++ b/sched/clock/clock_initialize.c @@ -165,7 +165,7 @@ static void clock_inittime(FAR const struct timespec *tp) clock_systime_timespec(&ts); - flags = spin_lock_irqsave(&g_basetime_lock); + flags = raw_spin_lock_irqsave(&g_basetime_lock); if (tp) { memcpy(&g_basetime, tp, sizeof(struct timespec)); @@ -185,7 +185,7 @@ static void clock_inittime(FAR const struct timespec *tp) g_basetime.tv_sec--; } - spin_unlock_irqrestore(&g_basetime_lock, flags); + raw_spin_unlock_irqrestore(&g_basetime_lock, flags); #else clock_inittimekeeping(tp); #endif @@ -347,9 +347,9 @@ void clock_resynchronize(FAR struct timespec *rtc_diff) * was last set, this gives us the current time. */ - flags = spin_lock_irqsave(&g_basetime_lock); + flags = raw_spin_lock_irqsave(&g_basetime_lock); clock_timespec_add(&bias, &g_basetime, &curr_ts); - spin_unlock_irqrestore(&g_basetime_lock, flags); + raw_spin_unlock_irqrestore(&g_basetime_lock, flags); /* Check if RTC has advanced past system time. */ diff --git a/sched/clock/clock_perf.c b/sched/clock/clock_perf.c index 1fd9c46eb33cc..82e98bdaaec49 100644 --- a/sched/clock/clock_perf.c +++ b/sched/clock/clock_perf.c @@ -95,7 +95,7 @@ clock_t perf_gettime(void) { FAR struct perf_s *perf = &g_perf; clock_t now = up_perf_gettime(); - irqstate_t flags = spin_lock_irqsave(&perf->lock); + irqstate_t flags = raw_spin_lock_irqsave(&perf->lock); clock_t result; /* Check if overflow */ @@ -107,7 +107,7 @@ clock_t perf_gettime(void) perf->last = now; result = (clock_t)now | (clock_t)perf->overflow << 32; - spin_unlock_irqrestore(&perf->lock, flags); + raw_spin_unlock_irqrestore(&perf->lock, flags); return result; } diff --git a/sched/clock/clock_settime.c b/sched/clock/clock_settime.c index 41017e69647e2..788c5f77c9e8d 100644 --- a/sched/clock/clock_settime.c +++ b/sched/clock/clock_settime.c @@ -78,11 +78,11 @@ void nxclock_settime(clockid_t clock_id, FAR const struct timespec *tp) clock_systime_timespec(&bias); - flags = spin_lock_irqsave(&g_basetime_lock); + flags = raw_spin_lock_irqsave(&g_basetime_lock); clock_timespec_subtract(tp, &bias, &g_basetime); - spin_unlock_irqrestore(&g_basetime_lock, flags); + raw_spin_unlock_irqrestore(&g_basetime_lock, flags); /* Setup the RTC (lo- or high-res) */ diff --git a/sched/clock/clock_systime_timespec.c b/sched/clock/clock_systime_timespec.c index fbb7ed6f14e92..153472d23b9dd 100644 --- a/sched/clock/clock_systime_timespec.c +++ b/sched/clock/clock_systime_timespec.c @@ -68,9 +68,9 @@ int clock_systime_timespec(FAR struct timespec *ts) up_rtc_gettime(ts); - flags = spin_lock_irqsave(&g_basetime_lock); + flags = raw_spin_lock_irqsave(&g_basetime_lock); clock_timespec_subtract(ts, &g_basetime, ts); - spin_unlock_irqrestore(&g_basetime_lock, flags); + raw_spin_unlock_irqrestore(&g_basetime_lock, flags); } else { diff --git a/sched/clock/clock_timekeeping.c b/sched/clock/clock_timekeeping.c index 8944857e641d9..5c3a38052ed60 100644 --- a/sched/clock/clock_timekeeping.c +++ b/sched/clock/clock_timekeeping.c @@ -73,7 +73,7 @@ static int clock_get_current_time(FAR struct timespec *ts, time_t sec; int ret; - flags = spin_lock_irqsave(&g_clock_lock); + flags = raw_spin_lock_irqsave(&g_clock_lock); ret = up_timer_gettick(&counter); if (ret < 0) @@ -97,7 +97,7 @@ static int clock_get_current_time(FAR struct timespec *ts, ts->tv_sec = base->tv_sec + sec; errout_in_critical_section: - spin_unlock_irqrestore(&g_clock_lock, flags); + raw_spin_unlock_irqrestore(&g_clock_lock, flags); return ret; } @@ -124,7 +124,7 @@ int clock_timekeeping_set_wall_time(FAR const struct timespec *ts) uint64_t counter; int ret; - flags = spin_lock_irqsave(&g_clock_lock); + flags = raw_spin_lock_irqsave(&g_clock_lock); ret = up_timer_gettick(&counter); if (ret < 0) @@ -138,7 +138,7 @@ int clock_timekeeping_set_wall_time(FAR const struct timespec *ts) g_clock_last_counter = counter; errout_in_critical_section: - spin_unlock_irqrestore(&g_clock_lock, flags); + raw_spin_unlock_irqrestore(&g_clock_lock, flags); return ret; } @@ -189,7 +189,7 @@ int adjtime(FAR const struct timeval *delta, FAR struct timeval *olddelta) return -1; } - flags = spin_lock_irqsave(&g_clock_lock); + flags = raw_spin_lock_irqsave(&g_clock_lock); adjust_usec = delta->tv_sec * USEC_PER_SEC + delta->tv_usec; @@ -200,7 +200,7 @@ int adjtime(FAR const struct timeval *delta, FAR struct timeval *olddelta) g_clock_adjust = adjust_usec; - spin_unlock_irqrestore(&g_clock_lock, flags); + raw_spin_unlock_irqrestore(&g_clock_lock, flags); return OK; } @@ -218,7 +218,7 @@ void clock_update_wall_time(void) time_t sec; int ret; - flags = spin_lock_irqsave(&g_clock_lock); + flags = raw_spin_lock_irqsave(&g_clock_lock); ret = up_timer_gettick(&counter); if (ret < 0) @@ -272,7 +272,7 @@ void clock_update_wall_time(void) g_clock_last_counter = counter; errout_in_critical_section: - spin_unlock_irqrestore(&g_clock_lock, flags); + raw_spin_unlock_irqrestore(&g_clock_lock, flags); } /**************************************************************************** @@ -283,7 +283,7 @@ void clock_inittimekeeping(FAR const struct timespec *tp) { irqstate_t flags; - flags = spin_lock_irqsave(&g_clock_lock); + flags = raw_spin_lock_irqsave(&g_clock_lock); up_timer_getmask(&g_clock_mask); if (tp) @@ -296,7 +296,7 @@ void clock_inittimekeeping(FAR const struct timespec *tp) } up_timer_gettick(&g_clock_last_counter); - spin_unlock_irqrestore(&g_clock_lock, flags); + raw_spin_unlock_irqrestore(&g_clock_lock, flags); } #endif /* CONFIG_CLOCK_TIMEKEEPING */ diff --git a/sched/group/group_join.c b/sched/group/group_join.c index 8209ba93577d4..80058a54b2c51 100644 --- a/sched/group/group_join.c +++ b/sched/group/group_join.c @@ -102,9 +102,9 @@ void group_join(FAR struct pthread_tcb_s *tcb) /* Add the member to the group */ - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); sq_addfirst(&tcb->cmn.member, &group->tg_members); - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); } #endif /* !CONFIG_DISABLE_PTHREAD */ diff --git a/sched/group/group_leave.c b/sched/group/group_leave.c index a8e6dc0d05747..fcb67f9558d0d 100644 --- a/sched/group/group_leave.c +++ b/sched/group/group_leave.c @@ -189,9 +189,9 @@ void group_leave(FAR struct tcb_s *tcb) /* Remove the member from group. */ #ifdef HAVE_GROUP_MEMBERS - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); sq_rem(&tcb->member, &group->tg_members); - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); /* Have all of the members left the group? */ diff --git a/sched/irq/irq_attach.c b/sched/irq/irq_attach.c index cccd353ca792a..7492857c7836c 100644 --- a/sched/irq/irq_attach.c +++ b/sched/irq/irq_attach.c @@ -68,13 +68,13 @@ int irq_to_ndx(int irq) { DEBUGASSERT(g_irqmap_count < CONFIG_ARCH_NUSER_INTERRUPTS); - irqstate_t flags = spin_lock_irqsave(&g_irqlock); + irqstate_t flags = raw_spin_lock_irqsave(&g_irqlock); if (g_irqmap[irq] == 0) { g_irqmap[irq] = g_irqmap_count++; } - spin_unlock_irqrestore(&g_irqlock, flags); + raw_spin_unlock_irqrestore(&g_irqlock, flags); return g_irqmap[irq]; } #endif @@ -108,7 +108,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg) * to the unexpected interrupt handler. */ - flags = spin_lock_irqsave(&g_irqlock); + flags = raw_spin_lock_irqsave(&g_irqlock); if (isr == NULL) { /* Disable the interrupt if we can before detaching it. We might @@ -142,7 +142,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg) if (is_irqchain(ndx, isr)) { ret = irqchain_attach(ndx, isr, arg); - spin_unlock_irqrestore(&g_irqlock, flags); + raw_spin_unlock_irqrestore(&g_irqlock, flags); return ret; } #endif @@ -157,7 +157,7 @@ int irq_attach(int irq, xcpt_t isr, FAR void *arg) g_irqvector[ndx].count = 0; #endif - spin_unlock_irqrestore(&g_irqlock, flags); + raw_spin_unlock_irqrestore(&g_irqlock, flags); ret = OK; } diff --git a/sched/irq/irq_chain.c b/sched/irq/irq_chain.c index cb292ac45d21a..65173d1d1a6e4 100644 --- a/sched/irq/irq_chain.c +++ b/sched/irq/irq_chain.c @@ -149,14 +149,14 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg) FAR struct irqchain_s *curr; irqstate_t flags; - flags = spin_lock_irqsave(&g_irqchainlock); + flags = raw_spin_lock_irqsave(&g_irqchainlock); if (isr != irq_unexpected_isr) { if (g_irqvector[ndx].handler != irqchain_dispatch) { if (sq_count(&g_irqchainfreelist) < 2) { - spin_unlock_irqrestore(&g_irqchainlock, flags); + raw_spin_unlock_irqrestore(&g_irqchainlock, flags); return -ENOMEM; } @@ -174,7 +174,7 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg) node = (FAR struct irqchain_s *)sq_remfirst(&g_irqchainfreelist); if (node == NULL) { - spin_unlock_irqrestore(&g_irqchainlock, flags); + raw_spin_unlock_irqrestore(&g_irqchainlock, flags); return -ENOMEM; } @@ -195,7 +195,7 @@ int irqchain_attach(int ndx, xcpt_t isr, FAR void *arg) irqchain_detach_all(ndx); } - spin_unlock_irqrestore(&g_irqchainlock, flags); + raw_spin_unlock_irqrestore(&g_irqchainlock, flags); return OK; } @@ -217,7 +217,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg) return ndx; } - flags = spin_lock_irqsave(&g_irqchainlock); + flags = raw_spin_lock_irqsave(&g_irqchainlock); if (g_irqvector[ndx].handler == irqchain_dispatch) { @@ -263,7 +263,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg) ret = irq_detach(irq); } - spin_unlock_irqrestore(&g_irqchainlock, flags); + raw_spin_unlock_irqrestore(&g_irqchainlock, flags); } return ret; diff --git a/sched/mqueue/mq_msgfree.c b/sched/mqueue/mq_msgfree.c index 23134a497b595..9d207af3533b3 100644 --- a/sched/mqueue/mq_msgfree.c +++ b/sched/mqueue/mq_msgfree.c @@ -69,9 +69,9 @@ void nxmq_free_msg(FAR struct mqueue_msg_s *mqmsg) * list from interrupt handlers. */ - flags = spin_lock_irqsave(&g_msgfreelock); + flags = raw_spin_lock_irqsave(&g_msgfreelock); list_add_tail(&g_msgfree, &mqmsg->node); - spin_unlock_irqrestore(&g_msgfreelock, flags); + raw_spin_unlock_irqrestore(&g_msgfreelock, flags); } /* If this is a message pre-allocated for interrupts, @@ -84,9 +84,9 @@ void nxmq_free_msg(FAR struct mqueue_msg_s *mqmsg) * list from interrupt handlers. */ - flags = spin_lock_irqsave(&g_msgfreelock); + flags = raw_spin_lock_irqsave(&g_msgfreelock); list_add_tail(&g_msgfreeirq, &mqmsg->node); - spin_unlock_irqrestore(&g_msgfreelock, flags); + raw_spin_unlock_irqrestore(&g_msgfreelock, flags); } /* Otherwise, deallocate it. Note: interrupt handlers diff --git a/sched/mqueue/mq_send.c b/sched/mqueue/mq_send.c index 241bd3cd25ff0..41f3b785461a1 100644 --- a/sched/mqueue/mq_send.c +++ b/sched/mqueue/mq_send.c @@ -139,9 +139,9 @@ static FAR struct mqueue_msg_s *nxmq_alloc_msg(uint16_t msgsize) /* Try to get the message from the generally available free list. */ - flags = spin_lock_irqsave(&g_msgfreelock); + flags = raw_spin_lock_irqsave(&g_msgfreelock); mqmsg = (FAR struct mqueue_msg_s *)list_remove_head(&g_msgfree); - spin_unlock_irqrestore(&g_msgfreelock, flags); + raw_spin_unlock_irqrestore(&g_msgfreelock, flags); if (mqmsg == NULL) { /* If we were called from an interrupt handler, then try to get the @@ -153,9 +153,9 @@ static FAR struct mqueue_msg_s *nxmq_alloc_msg(uint16_t msgsize) { /* Try the free list reserved for interrupt handlers */ - flags = spin_lock_irqsave(&g_msgfreelock); + flags = raw_spin_lock_irqsave(&g_msgfreelock); mqmsg = (FAR struct mqueue_msg_s *)list_remove_head(&g_msgfreeirq); - spin_unlock_irqrestore(&g_msgfreelock, flags); + raw_spin_unlock_irqrestore(&g_msgfreelock, flags); } /* We were not called from an interrupt handler. */ diff --git a/sched/pthread/pthread_mutex.c b/sched/pthread/pthread_mutex.c index 24a063908f264..76614c5fcf141 100644 --- a/sched/pthread/pthread_mutex.c +++ b/sched/pthread/pthread_mutex.c @@ -65,10 +65,10 @@ static void pthread_mutex_add(FAR struct pthread_mutex_s *mutex) /* Add the mutex to the list of mutexes held by this pthread */ - flags = spin_lock_irqsave(&rtcb->mutex_lock); + flags = raw_spin_lock_irqsave(&rtcb->mutex_lock); mutex->flink = rtcb->mhead; rtcb->mhead = mutex; - spin_unlock_irqrestore(&rtcb->mutex_lock, flags); + raw_spin_unlock_irqrestore(&rtcb->mutex_lock, flags); } /**************************************************************************** @@ -92,7 +92,7 @@ static void pthread_mutex_remove(FAR struct pthread_mutex_s *mutex) FAR struct pthread_mutex_s *prev; irqstate_t flags; - flags = spin_lock_irqsave(&rtcb->mutex_lock); + flags = raw_spin_lock_irqsave(&rtcb->mutex_lock); /* Remove the mutex from the list of mutexes held by this task */ @@ -118,7 +118,7 @@ static void pthread_mutex_remove(FAR struct pthread_mutex_s *mutex) } mutex->flink = NULL; - spin_unlock_irqrestore(&rtcb->mutex_lock, flags); + raw_spin_unlock_irqrestore(&rtcb->mutex_lock, flags); } /**************************************************************************** @@ -372,7 +372,7 @@ void pthread_mutex_inconsistent(FAR struct tcb_s *tcb) DEBUGASSERT(tcb != NULL); - flags = spin_lock_irqsave(&tcb->mutex_lock); + flags = raw_spin_lock_irqsave(&tcb->mutex_lock); /* Remove and process each mutex held by this task */ @@ -390,5 +390,5 @@ void pthread_mutex_inconsistent(FAR struct tcb_s *tcb) mutex_unlock(&mutex->mutex); } - spin_unlock_irqrestore(&tcb->mutex_lock, flags); + raw_spin_unlock_irqrestore(&tcb->mutex_lock, flags); } diff --git a/sched/sched/sched_profil.c b/sched/sched/sched_profil.c index 40c276a0e79a3..72426be6802dc 100644 --- a/sched/sched/sched_profil.c +++ b/sched/sched/sched_profil.c @@ -75,7 +75,7 @@ static int profil_timer_handler_cpu(FAR void *arg) uintptr_t pc = up_getusrpc(NULL); irqstate_t flags; - flags = spin_lock_irqsave(&prof->lock); + flags = raw_spin_lock_irqsave(&prof->lock); if (pc >= prof->lowpc && pc < prof->highpc) { size_t idx = (pc - prof->lowpc) / 2; @@ -87,7 +87,7 @@ static int profil_timer_handler_cpu(FAR void *arg) prof->counter[idx]++; } - spin_unlock_irqrestore(&prof->lock, flags); + raw_spin_unlock_irqrestore(&prof->lock, flags); return OK; } @@ -156,12 +156,12 @@ int profil(FAR unsigned short *buf, size_t bufsiz, memset(buf, 0, bufsiz); highpc = (uintmax_t)bufsiz * 65536 / scale; - flags = spin_lock_irqsave(&prof->lock); + flags = raw_spin_lock_irqsave(&prof->lock); prof->counter = buf; prof->lowpc = offset; prof->highpc = offset + highpc; prof->scale = scale; - spin_unlock_irqrestore(&prof->lock, flags); + raw_spin_unlock_irqrestore(&prof->lock, flags); wd_start(&prof->timer, PROFTICK, profil_timer_handler, (wdparm_t)(uintptr_t)prof); diff --git a/sched/sched/sched_smp.c b/sched/sched/sched_smp.c index 46f70df7e7740..b81a06c45ab3a 100644 --- a/sched/sched/sched_smp.c +++ b/sched/sched/sched_smp.c @@ -77,13 +77,13 @@ static void nxsched_smp_call_add(int cpu, { irqstate_t flags; - flags = spin_lock_irqsave(&g_smp_call_lock); + flags = raw_spin_lock_irqsave(&g_smp_call_lock); if (!sq_inqueue(&data->node[cpu], &g_smp_call_queue[cpu])) { sq_addlast(&data->node[cpu], &g_smp_call_queue[cpu]); } - spin_unlock_irqrestore(&g_smp_call_lock, flags); + raw_spin_unlock_irqrestore(&g_smp_call_lock, flags); } /**************************************************************************** @@ -114,7 +114,7 @@ int nxsched_smp_call_handler(int irq, FAR void *context, FAR sq_entry_t *next; int cpu = this_cpu(); - irqstate_t flags = spin_lock_irqsave(&g_smp_call_lock); + irqstate_t flags = raw_spin_lock_irqsave(&g_smp_call_lock); call_queue = &g_smp_call_queue[cpu]; @@ -126,11 +126,11 @@ int nxsched_smp_call_handler(int irq, FAR void *context, sq_rem(&data->node[cpu], call_queue); - spin_unlock_irqrestore(&g_smp_call_lock, flags); + raw_spin_unlock_irqrestore(&g_smp_call_lock, flags); ret = data->func(data->arg); - flags = spin_lock_irqsave(&g_smp_call_lock); + flags = raw_spin_lock_irqsave(&g_smp_call_lock); if (data->cookie != NULL) { @@ -143,7 +143,7 @@ int nxsched_smp_call_handler(int irq, FAR void *context, } } - spin_unlock_irqrestore(&g_smp_call_lock, flags); + raw_spin_unlock_irqrestore(&g_smp_call_lock, flags); return OK; } diff --git a/sched/signal/sig_action.c b/sched/signal/sig_action.c index 731ff9c70ce82..3c4d34bba65bf 100644 --- a/sched/signal/sig_action.c +++ b/sched/signal/sig_action.c @@ -89,7 +89,7 @@ static void nxsig_alloc_actionblock(void) /* Use pre-allocated instances only once */ #if CONFIG_SIG_PREALLOC_ACTIONS > 0 - flags = spin_lock_irqsave(&g_sigaction_spin); + flags = raw_spin_lock_irqsave(&g_sigaction_spin); if (!g_sigactions_used) { for (i = 0; i < CONFIG_SIG_PREALLOC_ACTIONS; i++) @@ -100,7 +100,7 @@ static void nxsig_alloc_actionblock(void) g_sigactions_used = true; } - spin_unlock_irqrestore(&g_sigaction_spin, flags); + raw_spin_unlock_irqrestore(&g_sigaction_spin, flags); #endif /* Allocate a block of signal actions */ @@ -108,14 +108,14 @@ static void nxsig_alloc_actionblock(void) sigact = kmm_malloc((sizeof(sigactq_t)) * CONFIG_SIG_ALLOC_ACTIONS); if (sigact != NULL) { - flags = spin_lock_irqsave(&g_sigaction_spin); + flags = raw_spin_lock_irqsave(&g_sigaction_spin); for (i = 0; i < CONFIG_SIG_ALLOC_ACTIONS; i++) { sq_addlast((FAR sq_entry_t *)sigact++, &g_sigfreeaction); } - spin_unlock_irqrestore(&g_sigaction_spin, flags); + raw_spin_unlock_irqrestore(&g_sigaction_spin, flags); } } @@ -134,9 +134,9 @@ static FAR sigactq_t *nxsig_alloc_action(void) /* Try to get the signal action structure from the free list */ - flags = spin_lock_irqsave(&g_sigaction_spin); + flags = raw_spin_lock_irqsave(&g_sigaction_spin); sigact = (FAR sigactq_t *)sq_remfirst(&g_sigfreeaction); - spin_unlock_irqrestore(&g_sigaction_spin, flags); + raw_spin_unlock_irqrestore(&g_sigaction_spin, flags); /* Check if we got one via loop as not in critical section now */ @@ -148,9 +148,9 @@ static FAR sigactq_t *nxsig_alloc_action(void) /* And try again */ - flags = spin_lock_irqsave(&g_sigaction_spin); + flags = raw_spin_lock_irqsave(&g_sigaction_spin); sigact = (FAR sigactq_t *)sq_remfirst(&g_sigfreeaction); - spin_unlock_irqrestore(&g_sigaction_spin, flags); + raw_spin_unlock_irqrestore(&g_sigaction_spin, flags); } return sigact; @@ -459,9 +459,9 @@ void nxsig_release_action(FAR sigactq_t *sigact) { /* Non-preallocated instances will never return to heap! */ - flags = spin_lock_irqsave(&g_sigaction_spin); + flags = raw_spin_lock_irqsave(&g_sigaction_spin); sq_addlast((FAR sq_entry_t *)sigact, &g_sigfreeaction); - spin_unlock_irqrestore(&g_sigaction_spin, flags); + raw_spin_unlock_irqrestore(&g_sigaction_spin, flags); } else { diff --git a/sched/signal/sig_default.c b/sched/signal/sig_default.c index a2c01438570e7..f7e12b2243590 100644 --- a/sched/signal/sig_default.c +++ b/sched/signal/sig_default.c @@ -534,9 +534,9 @@ _sa_handler_t nxsig_default(FAR struct tcb_s *tcb, int signo, bool defaction) { /* nxsig_addset() is not atomic (but neither is sigaction()) */ - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); nxsig_addset(&group->tg_sigdefault, signo); - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); } } @@ -546,9 +546,9 @@ _sa_handler_t nxsig_default(FAR struct tcb_s *tcb, int signo, bool defaction) * atomic (but neither is sigaction()). */ - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); nxsig_delset(&group->tg_sigdefault, signo); - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); } return handler; diff --git a/sched/signal/sig_findaction.c b/sched/signal/sig_findaction.c index ddfc777d0df16..553b25aa54bfc 100644 --- a/sched/signal/sig_findaction.c +++ b/sched/signal/sig_findaction.c @@ -57,7 +57,7 @@ FAR sigactq_t *nxsig_find_action(FAR struct task_group_s *group, int signo) * protection. */ - flags = spin_lock_irqsave(&group->tg_lock); + flags = raw_spin_lock_irqsave(&group->tg_lock); /* Search the list for a sigaction on this signal */ @@ -65,7 +65,7 @@ FAR sigactq_t *nxsig_find_action(FAR struct task_group_s *group, int signo) ((sigact) && (sigact->signo != signo)); sigact = sigact->flink); - spin_unlock_irqrestore(&group->tg_lock, flags); + raw_spin_unlock_irqrestore(&group->tg_lock, flags); } return sigact; diff --git a/sched/timer/timer_create.c b/sched/timer/timer_create.c index 4f42d8902c24b..0d2ac7d7b9795 100644 --- a/sched/timer/timer_create.c +++ b/sched/timer/timer_create.c @@ -63,10 +63,10 @@ static FAR struct posix_timer_s *timer_allocate(void) /* Try to get a preallocated timer from the free list */ #if CONFIG_PREALLOC_TIMERS > 0 - flags = spin_lock_irqsave(&g_locktimers); + flags = raw_spin_lock_irqsave(&g_locktimers); ret = (FAR struct posix_timer_s *) sq_remfirst((FAR sq_queue_t *)&g_freetimers); - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); /* Did we get one? */ @@ -95,9 +95,9 @@ static FAR struct posix_timer_s *timer_allocate(void) /* And add it to the end of the list of allocated timers */ - flags = spin_lock_irqsave(&g_locktimers); + flags = raw_spin_lock_irqsave(&g_locktimers); sq_addlast((FAR sq_entry_t *)ret, (FAR sq_queue_t *)&g_alloctimers); - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); } return ret; diff --git a/sched/timer/timer_initialize.c b/sched/timer/timer_initialize.c index 2a29328b8be62..a3a3c3645525b 100644 --- a/sched/timer/timer_initialize.c +++ b/sched/timer/timer_initialize.c @@ -139,7 +139,7 @@ void timer_deleteall(pid_t pid) sq_init(&freetimers); - flags = spin_lock_irqsave(&g_locktimers); + flags = raw_spin_lock_irqsave(&g_locktimers); for (timer = (FAR struct posix_timer_s *)g_alloctimers.head; timer != NULL; timer = next) @@ -152,7 +152,7 @@ void timer_deleteall(pid_t pid) } } - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); for (timer = (FAR struct posix_timer_s *)freetimers.head; timer != NULL; @@ -188,7 +188,7 @@ FAR struct posix_timer_s *timer_gethandle(timer_t timerid) if (timerid != NULL) { - flags = spin_lock_irqsave(&g_locktimers); + flags = raw_spin_lock_irqsave(&g_locktimers); sq_for_every(&g_alloctimers, entry) { @@ -199,7 +199,7 @@ FAR struct posix_timer_s *timer_gethandle(timer_t timerid) } } - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); } return timer; diff --git a/sched/timer/timer_release.c b/sched/timer/timer_release.c index 9a132c403ceea..cb7ea7a4b3ddb 100644 --- a/sched/timer/timer_release.c +++ b/sched/timer/timer_release.c @@ -57,7 +57,7 @@ static inline void timer_free(struct posix_timer_s *timer) /* Remove the timer from the allocated list */ - flags = spin_lock_irqsave(&g_locktimers); + flags = raw_spin_lock_irqsave(&g_locktimers); sq_rem((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_alloctimers); /* Return it to the free list if it is one of the preallocated timers */ @@ -66,14 +66,14 @@ static inline void timer_free(struct posix_timer_s *timer) if ((timer->pt_flags & PT_FLAGS_PREALLOCATED) != 0) { sq_addlast((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_freetimers); - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); } else #endif { /* Otherwise, return it to the heap */ - spin_unlock_irqrestore(&g_locktimers, flags); + raw_spin_unlock_irqrestore(&g_locktimers, flags); kmm_free(timer); } } diff --git a/sched/wdog/wd_cancel.c b/sched/wdog/wd_cancel.c index ae8f80cc7c9e5..922c426957273 100644 --- a/sched/wdog/wd_cancel.c +++ b/sched/wdog/wd_cancel.c @@ -63,13 +63,13 @@ int wd_cancel(FAR struct wdog_s *wdog) irqstate_t flags; bool head; - flags = spin_lock_irqsave(&g_wdspinlock); + flags = raw_spin_lock_irqsave(&g_wdspinlock); /* Make sure that the watchdog is valid and still active. */ if (wdog == NULL || !WDOG_ISACTIVE(wdog)) { - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); return -EINVAL; } @@ -89,7 +89,7 @@ int wd_cancel(FAR struct wdog_s *wdog) /* Mark the watchdog inactive */ wdog->func = NULL; - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); if (head) { diff --git a/sched/wdog/wd_start.c b/sched/wdog/wd_start.c index ca5b7bea3363b..8bb5410391f72 100644 --- a/sched/wdog/wd_start.c +++ b/sched/wdog/wd_start.c @@ -116,7 +116,7 @@ static inline_function void wd_expiration(clock_t ticks) wdentry_t func; wdparm_t arg; - flags = spin_lock_irqsave(&g_wdspinlock); + flags = raw_spin_lock_irqsave(&g_wdspinlock); #ifdef CONFIG_SCHED_TICKLESS /* Increment the nested watchdog timer count to handle cases where wd_start @@ -154,11 +154,11 @@ static inline_function void wd_expiration(clock_t ticks) /* Execute the watchdog function */ up_setpicbase(wdog->picbase); - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); CALL_FUNC(func, arg); - flags = spin_lock_irqsave(&g_wdspinlock); + flags = raw_spin_lock_irqsave(&g_wdspinlock); } #ifdef CONFIG_SCHED_TICKLESS @@ -167,7 +167,7 @@ static inline_function void wd_expiration(clock_t ticks) g_wdtimernested--; #endif - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); } /**************************************************************************** @@ -299,7 +299,7 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks, * the critical section is established. */ - flags = spin_lock_irqsave(&g_wdspinlock); + flags = raw_spin_lock_irqsave(&g_wdspinlock); #ifdef CONFIG_SCHED_TICKLESS /* We need to reassess timer if the watchdog list head has changed. */ @@ -320,12 +320,12 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks, * then this will pick that new delay. */ - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); nxsched_reassess_timer(); } else { - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); } #else UNUSED(reassess); @@ -339,7 +339,7 @@ int wd_start_abstick(FAR struct wdog_s *wdog, clock_t ticks, } wd_insert(wdog, ticks, wdentry, arg); - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); #endif sched_note_wdog(NOTE_WDOG_START, wdentry, (FAR void *)(uintptr_t)ticks); @@ -436,13 +436,13 @@ clock_t wd_timer(clock_t ticks, bool noswitches) wd_expiration(ticks); } - flags = spin_lock_irqsave(&g_wdspinlock); + flags = raw_spin_lock_irqsave(&g_wdspinlock); /* Return the delay for the next watchdog to expire */ if (list_is_empty(&g_wdactivelist)) { - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); return 0; } @@ -453,7 +453,7 @@ clock_t wd_timer(clock_t ticks, bool noswitches) wdog = list_first_entry(&g_wdactivelist, struct wdog_s, node); ret = wdog->expired - ticks; - spin_unlock_irqrestore(&g_wdspinlock, flags); + raw_spin_unlock_irqrestore(&g_wdspinlock, flags); /* Return the delay for the next watchdog to expire */ diff --git a/sched/wqueue/kwork_cancel.c b/sched/wqueue/kwork_cancel.c index 89bec581bdaea..b8f793557d019 100644 --- a/sched/wqueue/kwork_cancel.c +++ b/sched/wqueue/kwork_cancel.c @@ -58,7 +58,7 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync, * new work is typically added to the work queue from interrupt handlers. */ - flags = spin_lock_irqsave(&wqueue->lock); + flags = raw_spin_lock_irqsave(&wqueue->lock); if (work->worker != NULL) { /* Remove the entry from the work queue and make sure that it is @@ -84,14 +84,14 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync, wqueue->worker[wndx].pid != nxsched_gettid()) { wqueue->worker[wndx].wait_count++; - spin_unlock_irqrestore(&wqueue->lock, flags); + raw_spin_unlock_irqrestore(&wqueue->lock, flags); nxsem_wait_uninterruptible(&wqueue->worker[wndx].wait); return 1; } } } - spin_unlock_irqrestore(&wqueue->lock, flags); + raw_spin_unlock_irqrestore(&wqueue->lock, flags); return ret; } diff --git a/sched/wqueue/kwork_notifier.c b/sched/wqueue/kwork_notifier.c index 6b274e44f2d44..75503ac4659aa 100644 --- a/sched/wqueue/kwork_notifier.c +++ b/sched/wqueue/kwork_notifier.c @@ -167,7 +167,7 @@ static void work_notifier_worker(FAR void *arg) /* Disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_notifier_lock); + flags = raw_spin_lock_irqsave(&g_notifier_lock); /* Remove the notification from the pending list */ @@ -181,7 +181,7 @@ static void work_notifier_worker(FAR void *arg) dq_addlast(¬ifier->entry, &g_notifier_free); } - spin_unlock_irqrestore(&g_notifier_lock, flags); + raw_spin_unlock_irqrestore(&g_notifier_lock, flags); } /**************************************************************************** @@ -218,14 +218,14 @@ int work_notifier_setup(FAR struct work_notifier_s *info) /* Disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_notifier_lock); + flags = raw_spin_lock_irqsave(&g_notifier_lock); /* Try to get the entry from the free list */ notifier = (FAR struct work_notifier_entry_s *) dq_remfirst(&g_notifier_free); - spin_unlock_irqrestore(&g_notifier_lock, flags); + raw_spin_unlock_irqrestore(&g_notifier_lock, flags); if (notifier == NULL) { @@ -250,7 +250,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info) /* Disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_notifier_lock); + flags = raw_spin_lock_irqsave(&g_notifier_lock); /* Generate a unique key for this notification */ @@ -267,7 +267,7 @@ int work_notifier_setup(FAR struct work_notifier_s *info) dq_addlast(¬ifier->entry, &g_notifier_pending); ret = notifier->key; - spin_unlock_irqrestore(&g_notifier_lock, flags); + raw_spin_unlock_irqrestore(&g_notifier_lock, flags); } return ret; @@ -298,7 +298,7 @@ void work_notifier_teardown(int key) /* Disable interrupts very briefly. */ - flags = spin_lock_irqsave(&g_notifier_lock); + flags = raw_spin_lock_irqsave(&g_notifier_lock); /* Find the entry matching this key in the g_notifier_pending list. We * assume that there is only one. @@ -310,20 +310,20 @@ void work_notifier_teardown(int key) /* Remove the notification from the pending list */ dq_rem(¬ifier->entry, &g_notifier_pending); - spin_unlock_irqrestore(&g_notifier_lock, flags); + raw_spin_unlock_irqrestore(&g_notifier_lock, flags); /* Cancel the work, this may be waiting */ work_cancel_sync(notifier->info.qid, ¬ifier->work); - flags = spin_lock_irqsave(&g_notifier_lock); + flags = raw_spin_lock_irqsave(&g_notifier_lock); /* Put the notification to the free list */ dq_addlast(¬ifier->entry, &g_notifier_free); } - spin_unlock_irqrestore(&g_notifier_lock, flags); + raw_spin_unlock_irqrestore(&g_notifier_lock, flags); } /****************************************************************************