Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12:Update
xen
5a6b36cd-5-x86-use-SPEC_CTRL-on-entry.patch
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5a6b36cd-5-x86-use-SPEC_CTRL-on-entry.patch of Package xen
# Commit 5e7962901131186d3514528ed57c7a9901a15a3e # Date 2018-01-26 14:10:21 +0000 # Author Andrew Cooper <andrew.cooper3@citrix.com> # Committer Andrew Cooper <andrew.cooper3@citrix.com> x86/entry: Organise the use of MSR_SPEC_CTRL at each entry/exit point We need to be able to either set or clear IBRS in Xen context, as well as restore appropriate guest values in guest context. See the documentation in asm-x86/spec_ctrl_asm.h for details. With the contemporary microcode, writes to %cr3 are slower when SPEC_CTRL.IBRS is set. Therefore, the positioning of SPEC_CTRL_{ENTRY/EXIT}* is important. Ideally, the IBRS_SET/IBRS_CLEAR hunks might be positioned either side of the %cr3 change, but that is rather more complicated to arrange, and could still result in a guest controlled value in SPEC_CTRL during the %cr3 change, negating the saving if the guest chose to have IBRS set. Therefore, we optimise for the pre-Skylake case (being far more common in the field than Skylake and later, at the moment), where we have a Xen-preferred value of IBRS clear when switching %cr3. There is a semi-unrelated bugfix, where various asm_defn.h macros have a hidden dependency on PAGE_SIZE, which results in an assembler error if used in a .macro definition. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -79,6 +79,11 @@ UNLIKELY_END(svm_trace) or $X86_EFLAGS_MBS,%rax mov %rax,VMCB_rflags(%rcx) + mov VCPU_arch_spec_ctrl(%rbx), %eax + + /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ + SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + pop %r15 pop %r14 pop %r13 @@ -101,8 +106,11 @@ UNLIKELY_END(svm_trace) SAVE_ALL GET_CURRENT(%rbx) - mov VCPU_svm_vmcb(%rbx),%rcx + SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + + mov VCPU_svm_vmcb(%rbx),%rcx movb $0,VCPU_svm_vmcb_in_sync(%rbx) mov VMCB_rax(%rcx),%rax mov %rax,UREGS_rax(%rsp) --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -38,6 +38,9 @@ ENTRY(vmx_asm_vmexit_handler) movb $1,VCPU_vmx_launched(%rbx) mov %rax,VCPU_hvm_guest_cr2(%rbx) + SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + mov %rsp,%rdi call vmx_vmexit_handler @@ -66,6 +69,12 @@ UNLIKELY_END(realmode) mov %rsp,%rdi call vmx_vmenter_helper + + mov VCPU_arch_spec_ctrl(%rbx), %eax + + /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ + SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + mov VCPU_hvm_guest_cr2(%rbx),%rax pop %r15 @@ -97,6 +106,15 @@ UNLIKELY_END(realmode) .Lvmx_vmentry_fail: sti SAVE_ALL + + /* + * PV variant needed here as no guest code has executed (so + * MSR_SPEC_CTRL can't have changed value), and NMIs/MCEs are liable + * to hit (in which case the HVM variant might corrupt things). + */ + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + call vmx_vmentry_failure ud2 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -625,6 +625,7 @@ void __init __start_xen(unsigned long mb if ( cpu_has_efer ) rdmsrl(MSR_EFER, this_cpu(efer)); asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) ); + init_shadow_spec_ctrl_state(); smp_prepare_boot_cpu(); --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -41,6 +41,7 @@ #include <asm/flushtlb.h> #include <asm/msr.h> #include <asm/mtrr.h> +#include <asm/spec_ctrl.h> #include <asm/time.h> #include <asm/tboot.h> #include <mach_apic.h> @@ -329,6 +330,7 @@ void start_secondary(void *unused) if ( cpu_has_efer ) rdmsrl(MSR_EFER, this_cpu(efer)); asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) ); + init_shadow_spec_ctrl_state(); /* * Just as during early bootstrap, it is convenient here to disable --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -86,6 +86,7 @@ void __dummy__(void) OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp); OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss); OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags); + OFFSET(VCPU_arch_spec_ctrl, struct vcpu, arch.spec_ctrl); OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending); OFFSET(VCPU_mce_pending, struct vcpu, mce_pending); OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask); @@ -136,6 +137,9 @@ void __dummy__(void) OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3); OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3); + OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl); + OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, use_shadow_spec_ctrl); + OFFSET(CPUINFO_xen_ibrs, struct cpu_info, xen_ibrs); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -17,6 +17,9 @@ ENTRY(compat_hypercall) movl $TRAP_syscall, 4(%rsp) SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */ + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + cmpb $0,untrusted_msi(%rip) UNLIKELY_START(ne, msi_check) movl $HYPERCALL_VECTOR,%edi @@ -177,6 +180,12 @@ ENTRY(compat_restore_all_guest) and UREGS_eflags(%rsp),%r11d or $X86_EFLAGS_IF,%r11 mov %r11d,UREGS_eflags(%rsp) + + mov VCPU_arch_spec_ctrl(%rbx), %eax + + /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ + SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + RESTORE_ALL adj=8 compat=1 .Lft0: iretq --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -38,6 +38,9 @@ switch_to_kernel: restore_all_guest: ASSERT_INTERRUPTS_DISABLED + /* Stash guest SPEC_CTRL value while we can read struct vcpu. */ + mov VCPU_arch_spec_ctrl(%rbx), %r15d + /* Copy guest mappings and switch to per-CPU root page table. */ mov %cr3, %r9 GET_STACK_BASE(%rdx) @@ -65,6 +68,12 @@ restore_all_guest: write_cr3 rax, rdi, rsi .Lrag_keep_cr3: + /* Restore stashed SPEC_CTRL value. */ + mov %r15d, %eax + + /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ + SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + RESTORE_ALL testw $TRAP_syscall,4(%rsp) jz iret_exit_to_guest @@ -133,9 +142,9 @@ restore_all_xen: * Check whether we need to switch to the per-CPU page tables, in * case we return to late PV exit code (from an NMI or #MC). */ - GET_STACK_BASE(%rax) - mov STACK_CPUINFO_FIELD(xen_cr3)(%rax), %rdx - mov STACK_CPUINFO_FIELD(pv_cr3)(%rax), %rax + GET_STACK_BASE(%rbx) + mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rdx + mov STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax test %rdx, %rdx /* * Ideally the condition would be "nsz", but such doesn't exist, @@ -145,6 +154,9 @@ UNLIKELY_START(g, exit_cr3) write_cr3 rax, rdi, rsi UNLIKELY_END(exit_cr3) + /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ + SPEC_CTRL_EXIT_TO_XEN /* Req: %rbx=end, Clob: acd */ + RESTORE_ALL adj=8 iretq @@ -173,6 +185,9 @@ ENTRY(syscall_enter) movl $TRAP_syscall, 4(%rsp) SAVE_ALL + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + GET_STACK_BASE(%rbx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx neg %rcx @@ -336,6 +351,9 @@ GLOBAL(sysenter_eflags_saved) movl $TRAP_syscall, 4(%rsp) SAVE_ALL + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + GET_STACK_BASE(%rbx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx neg %rcx @@ -381,6 +399,9 @@ ENTRY(int80_direct_trap) movl $0x80, 4(%rsp) SAVE_ALL + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + GET_STACK_BASE(%rbx) mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx neg %rcx @@ -547,6 +568,10 @@ ENTRY(common_interrupt) SAVE_ALL GET_STACK_BASE(%r14) + + SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx mov %rcx, %r15 neg %rcx @@ -584,6 +609,10 @@ GLOBAL(handle_exception) SAVE_ALL GET_STACK_BASE(%r14) + + SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx mov %rcx, %r15 neg %rcx @@ -732,8 +761,12 @@ ENTRY(double_fault) movl $TRAP_double_fault,4(%rsp) SAVE_ALL - GET_STACK_BASE(%rbx) - mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rbx + GET_STACK_BASE(%r14) + + SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rbx test %rbx, %rbx jz .Ldblf_cr3_okay jns .Ldblf_cr3_load @@ -761,6 +794,10 @@ handle_ist_exception: SAVE_ALL GET_STACK_BASE(%r14) + + SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs, %r14=end, Clob: acd */ + /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ + mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx mov %rcx, %r15 neg %rcx --- a/xen/include/asm-x86/asm_defns.h +++ b/xen/include/asm-x86/asm_defns.h @@ -6,6 +6,7 @@ /* NB. Auto-generated from arch/.../asm-offsets.c */ #include <asm/asm-offsets.h> #endif +#include <asm/page.h> #include <asm/processor.h> #ifdef __ASSEMBLY__ @@ -123,4 +124,6 @@ void ret_from_intr(void); #endif +#include <asm/spec_ctrl_asm.h> + #endif /* __X86_ASM_DEFNS_H__ */ --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -31,8 +31,13 @@ struct cpu_info { */ unsigned long xen_cr3; unsigned long pv_cr3; + + /* See asm-x86/spec_ctrl_asm.h for usage. */ + unsigned int shadow_spec_ctrl; + bool_t use_shadow_spec_ctrl; + int8_t xen_ibrs; + /* get_stack_bottom() must be 16-byte aligned */ - unsigned long __pad_for_stack_bottom; }; static inline struct cpu_info *get_cpu_info(void) --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -20,8 +20,18 @@ #ifndef __X86_SPEC_CTRL_H__ #define __X86_SPEC_CTRL_H__ +#include <asm/current.h> + void init_speculation_mitigations(void); +static inline void init_shadow_spec_ctrl_state(void) +{ + struct cpu_info *info = get_cpu_info(); + + info->shadow_spec_ctrl = info->use_shadow_spec_ctrl = 0; + info->xen_ibrs = -1; +} + #endif /* !__X86_SPEC_CTRL_H__ */ /* --- /dev/null +++ b/xen/include/asm-x86/spec_ctrl_asm.h @@ -0,0 +1,225 @@ +/****************************************************************************** + * include/asm-x86/spec_ctrl.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; If not, see <http://www.gnu.org/licenses/>. + * + * Copyright (c) 2017-2018 Citrix Systems Ltd. + */ + +#ifndef __X86_SPEC_CTRL_ASM_H__ +#define __X86_SPEC_CTRL_ASM_H__ + +#ifdef __ASSEMBLY__ +#include <asm/msr-index.h> + +/* + * Saving and restoring MSR_SPEC_CTRL state is a little tricky. + * + * We want the guests choice of SPEC_CTRL while in guest context, and Xen's + * choice (set or clear, depending on the hardware) while running in Xen + * context. Therefore, a simplistic algorithm is: + * + * - Set/clear IBRS on entry to Xen + * - Set the guests' choice on exit to guest + * - Leave SPEC_CTRL unchanged on exit to xen + * + * There are two complicating factors: + * 1) HVM guests can have direct access to the MSR, so it can change + * behind Xen's back. + * 2) An NMI or MCE can interrupt at any point, including early in the entry + * path, or late in the exit path after restoring the guest value. This + * will corrupt the guest value. + * + * Factor 1 is dealt with by relying on NMIs/MCEs being blocked immediately + * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates + * current before loading Xen's MSR_SPEC_CTRL setting. + * + * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and + * use_shadow_spec_ctrl boolean per cpu. The synchronous use is: + * + * 1) Store guest value in shadow_spec_ctrl + * 2) Set use_shadow_spec_ctrl boolean + * 3) Load guest value into MSR_SPEC_CTRL + * 4) Exit to guest + * 5) Entry from guest + * 6) Clear use_shadow_spec_ctrl boolean + * 7) Load Xen's value into MSR_SPEC_CTRL + * + * The asynchronous use for interrupts/exceptions is: + * - Set/clear IBRS on entry to Xen + * - On exit to Xen, check use_shadow_spec_ctrl + * - If set, load shadow_spec_ctrl + * + * Therefore, an interrupt/exception which hits the synchronous path between + * steps 2 and 6 will restore the shadow value rather than leaving Xen's value + * loaded and corrupting the value used in guest context. + * + * The following ASM fragments implement this algorithm. See their local + * comments for further details. + * - SPEC_CTRL_ENTRY_FROM_VMEXIT + * - SPEC_CTRL_ENTRY_FROM_PV + * - SPEC_CTRL_ENTRY_FROM_INTR + * - SPEC_CTRL_EXIT_TO_XEN + * - SPEC_CTRL_EXIT_TO_GUEST + */ + +.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT +/* + * Requires %rbx=current, %rsp=regs/cpuinfo + * Clobbers %rax, %rcx, %rdx + * + * The common case is that a guest has direct access to MSR_SPEC_CTRL, at + * which point we need to save the guest value before setting IBRS for Xen. + * Unilaterally saving the guest value is shorter and faster than checking. + */ + cmpb $0, CPUINFO_xen_ibrs(%rsp) + jl .Lspec_entry_done\@ + + mov $MSR_SPEC_CTRL, %ecx + rdmsr + + /* Stash the value from hardware. */ + mov %eax, VCPU_arch_spec_ctrl(%rbx) + xor %edx, %edx + + /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */ + movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + + /* Load Xen's intended value. */ + movzbl CPUINFO_xen_ibrs(%rsp), %eax + wrmsr +.Lspec_entry_done\@: +.endm + +.macro DO_SPEC_CTRL_ENTRY maybexen:req +/* + * Requires %rsp=regs (also cpuinfo if !maybexen) + * Requires %r14=stack_end (if maybexen) + * Clobbers %rax, %rcx, %rdx + * + * PV guests can't update MSR_SPEC_CTRL behind Xen's back, so no need to read + * it back. Entries from guest context need to clear SPEC_CTRL shadowing, + * while entries from Xen must leave shadowing in its current state. + */ + xor %edx, %edx + + .if \maybexen + cmp %dl, STACK_CPUINFO_FIELD(xen_ibrs)(%r14) + .else + cmp %dl, CPUINFO_xen_ibrs(%rsp) + .endif + jl .Lspec_entry_done\@ + + mov $MSR_SPEC_CTRL, %ecx + + /* + * Clear SPEC_CTRL shadowing *before* loading Xen's value. If entering + * from a possibly-xen context, %rsp doesn't necessarily alias the cpuinfo + * block so calculate the position directly. + */ + .if \maybexen + /* Branchless `if ( !xen ) clear_shadowing` */ + testb $3, UREGS_cs(%rsp) + setz %al + and %al, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14) + .else + movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + .endif + + /* Load Xen's intended value. */ + .if \maybexen + movzbl STACK_CPUINFO_FIELD(xen_ibrs)(%r14), %eax + .else + movzbl CPUINFO_xen_ibrs(%rsp), %eax + .endif + wrmsr +.Lspec_entry_done\@: +.endm + +.macro DO_SPEC_CTRL_EXIT_TO_XEN +/* + * Requires %rbx=stack_end + * Clobbers %rax, %rcx, %rdx + * + * When returning to Xen context, look to see whether SPEC_CTRL shadowing is + * in effect, and reload the shadow value. This covers race conditions which + * exist with an NMI/MCE/etc hitting late in the return-to-guest path. + */ + xor %edx, %edx + + cmp %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%rbx) + je .L\@_skip + + mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax + mov $MSR_SPEC_CTRL, %ecx + wrmsr + +.L\@_skip: +.endm + +.macro DO_SPEC_CTRL_EXIT_TO_GUEST +/* + * Requires %eax=spec_ctrl, %rsp=regs/cpuinfo + * Clobbers %rcx, %rdx + * + * When returning to guest context, set up SPEC_CTRL shadowing and load the + * guest value. + */ + xor %edx, %edx + cmp %dl, CPUINFO_xen_ibrs(%rsp) + jl .Lspec_exit_done\@ + + /* Set up shadow value *before* enabling shadowing. */ + mov %eax, CPUINFO_shadow_spec_ctrl(%rsp) + + /* Set SPEC_CTRL shadowing *before* loading the guest value. */ + movb $1, CPUINFO_use_shadow_spec_ctrl(%rsp) + + mov $MSR_SPEC_CTRL, %ecx + wrmsr +.Lspec_exit_done\@: +.endm + +/* Use after a VMEXIT from an HVM guest. */ +#define SPEC_CTRL_ENTRY_FROM_VMEXIT \ + DO_SPEC_CTRL_ENTRY_FROM_VMEXIT + +/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */ +#define SPEC_CTRL_ENTRY_FROM_PV \ + DO_SPEC_CTRL_ENTRY maybexen=0 + +/* Use in interrupt/exception context. May interrupt Xen or PV context. */ +#define SPEC_CTRL_ENTRY_FROM_INTR \ + DO_SPEC_CTRL_ENTRY maybexen=1 + +/* Use when exiting to Xen context. */ +#define SPEC_CTRL_EXIT_TO_XEN \ + DO_SPEC_CTRL_EXIT_TO_XEN + +/* Use when exiting to guest context. */ +#define SPEC_CTRL_EXIT_TO_GUEST \ + DO_SPEC_CTRL_EXIT_TO_GUEST + +#endif /* __ASSEMBLY__ */ +#endif /* !__X86_SPEC_CTRL_ASM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor