Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
home:Ledest:erlang:23
erlang
0725-Fix-typos-in-erts-example-include.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 0725-Fix-typos-in-erts-example-include.patch of Package erlang
From 66fd5472e1b3b42082914ebc72bf3497a061fc31 Mon Sep 17 00:00:00 2001 From: "Kian-Meng, Ang" <kianmeng@cpan.org> Date: Fri, 19 Nov 2021 11:35:37 +0800 Subject: [PATCH] Fix typos in erts/{example, include} --- erts/example/time_compat.erl | 4 ++-- erts/include/internal/ethr_atomics.h | 2 +- erts/include/internal/ethread_header_config.h.in | 2 +- erts/include/internal/gcc/ethr_dw_atomic.h | 2 +- erts/include/internal/gcc/ethr_membar.h | 12 ++++++------ erts/include/internal/i386/ethr_dw_atomic.h | 6 +++--- erts/include/internal/win/ethr_dw_atomic.h | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/erts/example/time_compat.erl b/erts/example/time_compat.erl index 6472a271b6..495d013333 100644 --- a/erts/example/time_compat.erl +++ b/erts/example/time_compat.erl @@ -26,7 +26,7 @@ %% versions. This way your code can automatically take advantage %% of the improvements in the API when available. This is an %% example of how to implement such an API, but it can be used -%% as is if you want to. Just add (a preferrably renamed version of) +%% as is if you want to. Just add (a preferably renamed version of) %% this module to your project, and call the API via this module %% instead of calling the BIFs directly. %% @@ -199,7 +199,7 @@ unique_integer(Modifiers) -> case is_valid_modifier_list(Modifiers) of true -> %% now() converted to an integer - %% fullfill the requirements of + %% fulfill the requirements of %% all modifiers: unique, positive, %% and monotonic... {MS, S, US} = erlang:now(), diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h index 06568201ad..65381fc558 100644 --- a/erts/include/internal/ethr_atomics.h +++ b/erts/include/internal/ethr_atomics.h @@ -62,7 +62,7 @@ * - read * - init * - * Appart from a function implementing the atomic operation + * Apart from a function implementing the atomic operation * with unspecified memory barrier semantics, there are * functions implementing each operation with the following * implied memory barrier semantics: diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in index 300068b952..fee3b013e8 100644 --- a/erts/include/internal/ethread_header_config.h.in +++ b/erts/include/internal/ethread_header_config.h.in @@ -182,7 +182,7 @@ /* Define if you use a gcc that supports the double word cmpxchg instruction */ #undef ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT -/* Define if gcc wont let you clobber ebx with cmpxchg8b and position +/* Define if gcc won't let you clobber ebx with cmpxchg8b and position independent code */ #undef ETHR_CMPXCHG8B_PIC_NO_CLOBBER_EBX diff --git a/erts/include/internal/gcc/ethr_dw_atomic.h b/erts/include/internal/gcc/ethr_dw_atomic.h index d661bf33fb..eb597d5a4d 100644 --- a/erts/include/internal/gcc/ethr_dw_atomic.h +++ b/erts/include/internal/gcc/ethr_dw_atomic.h @@ -72,7 +72,7 @@ typedef volatile ETHR_NATIVE_SU_DW_SINT_T * ethr_native_dw_ptr_t; * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte * aligned memory in 32-bit mode. A malloc implementation that does * not adhere to these alignment requirements is seriously broken, - * and we wont bother trying to work around it. + * and we won't bother trying to work around it. * * Since memory alignment may be off by one word we need to align at * runtime. We, therefore, need an extra word allocated. diff --git a/erts/include/internal/gcc/ethr_membar.h b/erts/include/internal/gcc/ethr_membar.h index aeef8115a3..6763c3475e 100644 --- a/erts/include/internal/gcc/ethr_membar.h +++ b/erts/include/internal/gcc/ethr_membar.h @@ -31,9 +31,9 @@ * ordered around it according to the semantics of the * barrier specified. * - * The C11 aproch is different. The __atomic builtins + * The C11 approach is different. The __atomic builtins * API takes a memory model parameter. Assuming that all - * memory syncronizations using the involved atomic + * memory synchronizations using the involved atomic * variables are made using this API, the synchronizations * will adhere to the memory models used. That is, you do * *not* know how loads and stores will be ordered around @@ -69,7 +69,7 @@ * Why is this? Since all synchronizations is expected * to be made using the __atomic builtins, memory * barriers only have to be issued by some of them, - * and you do not know which ones wont issue memory + * and you do not know which ones won't issue memory * barriers. * * One can easily be fooled into believing that when @@ -93,8 +93,8 @@ * __ATOMIC_ACQUIRE and __ATOMIC_RELEASE memory * models. This since an __atomic builtin memory * access using the __ATOMIC_ACQUIRE must at least - * issue an aquire memory barrier and an __atomic - * builtin memory acess with the __ATOMIC_RELEASE + * issue an acquire memory barrier and an __atomic + * builtin memory access with the __ATOMIC_RELEASE * memory model must at least issue a release memory * barrier. Otherwise the two cannot be paired. * 4. All __atomic builtins accessing memory using the @@ -240,7 +240,7 @@ ethr_full_fence__(void) #endif /* - * Define ETHR_READ_DEPEND_MEMORY_BARRIER for all architechtures + * Define ETHR_READ_DEPEND_MEMORY_BARRIER for all architectures * not known to order data dependent loads * * This is a bit too conservative, but better safe than sorry... diff --git a/erts/include/internal/i386/ethr_dw_atomic.h b/erts/include/internal/i386/ethr_dw_atomic.h index e3dbc82518..94e9bad2ff 100644 --- a/erts/include/internal/i386/ethr_dw_atomic.h +++ b/erts/include/internal/i386/ethr_dw_atomic.h @@ -67,7 +67,7 @@ typedef volatile ethr_native_sint128_t__ * ethr_native_dw_ptr_t; * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte * aligned memory in 32-bit mode. A malloc implementation that does * not adhere to these alignment requirements is seriously broken, - * and we wont bother trying to work around it. + * and we won't bother trying to work around it. * * Since memory alignment may be off by one word we need to align at * runtime. We, therefore, need an extra word allocated. @@ -159,7 +159,7 @@ ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var, #if ETHR_NO_CLOBBER_EBX__ && ETHR_CMPXCHG8B_REGISTER_SHORTAGE /* - * gcc wont let us use ebx as input and we + * gcc won't let us use ebx as input and we * get a register shortage */ @@ -176,7 +176,7 @@ ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var, #elif ETHR_NO_CLOBBER_EBX__ /* - * gcc wont let us use ebx as input + * gcc won't let us use ebx as input */ __asm__ __volatile__( diff --git a/erts/include/internal/win/ethr_dw_atomic.h b/erts/include/internal/win/ethr_dw_atomic.h index 03311f6025..d04d2e28ca 100644 --- a/erts/include/internal/win/ethr_dw_atomic.h +++ b/erts/include/internal/win/ethr_dw_atomic.h @@ -74,7 +74,7 @@ typedef volatile __int64 * ethr_native_dw_ptr_t; * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte * aligned memory in 32-bit mode. A malloc implementation that does * not adhere to these alignment requirements is seriously broken, - * and we wont bother trying to work around it. + * and we won't bother trying to work around it. * * Since memory alignment may be off by one word we need to align at * runtime. We, therefore, need an extra word allocated. -- 2.31.1
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor