Skip to content
This repository has been archived by the owner on May 29, 2024. It is now read-only.

Commit

Permalink
[GR-46278] Merge in jdk-17.0.8+7 (JVMCI_22.3).
Browse files Browse the repository at this point in the history
PullRequest: labsjdk-ce-17/116
  • Loading branch information
marwan-hallaoui committed Jul 19, 2023
2 parents 91f60dd + a5cd56d commit 859a935
Show file tree
Hide file tree
Showing 29 changed files with 761 additions and 152 deletions.
2 changes: 1 addition & 1 deletion make/conf/version-numbers.conf
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="16 17"
DEFAULT_JDK_SOURCE_TARGET_VERSION=17
DEFAULT_PROMOTED_VERSION_PRE=ea
DEFAULT_PROMOTED_VERSION_PRE=
47 changes: 33 additions & 14 deletions src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2964,6 +2964,22 @@ class StubGenerator: public StubCodeGenerator {
return start;
}

// Big-endian 128-bit + 64-bit -> 128-bit addition.
// Inputs: 128-bits. in is preserved.
// The least-significant 64-bit word is in the upper dword of the vector
// inc (the 64-bit increment) is preserved. Its lower dword must be zero
// Output: result
void be_add_128_64(FloatRegister result, FloatRegister in,
FloatRegister inc, FloatRegister tmp) {
assert_different_registers(result, tmp, inc);

__ addv(result, __ T2D, in, inc); // Add inc to the least-significant dword of input
__ cmhi(tmp, __ T2D, inc, result); // Check for result overflowing
__ ins(tmp, __ D, tmp, 0, 1); // Move LSD of comparison result to MSD
__ ins(tmp, __ D, inc, 1, 0); // Move 0 to LSD of comparison result
__ subv(result, __ T2D, result, tmp); // Subtract -1 from MSD if there was an overflow
}

// CTR AES crypt.
// Arguments:
//
Expand Down Expand Up @@ -3073,13 +3089,16 @@ class StubGenerator: public StubCodeGenerator {
// Setup the counter
__ movi(v4, __ T4S, 0);
__ movi(v5, __ T4S, 1);
__ ins(v4, __ S, v5, 3, 3); // v4 contains { 0, 0, 0, 1 }
__ ins(v4, __ S, v5, 2, 2); // v4 contains { 0, 1 }

__ ld1(v0, __ T16B, counter); // Load the counter into v0
__ rev32(v16, __ T16B, v0);
__ addv(v16, __ T4S, v16, v4);
__ rev32(v16, __ T16B, v16);
__ st1(v16, __ T16B, counter); // Save the incremented counter back
// 128-bit big-endian increment
__ ld1(v0, __ T16B, counter);
__ rev64(v16, __ T16B, v0);
be_add_128_64(v16, v16, v4, /*tmp*/v5);
__ rev64(v16, __ T16B, v16);
__ st1(v16, __ T16B, counter);
// Previous counter value is in v0
// v4 contains { 0, 1 }

{
// We have fewer than bulk_width blocks of data left. Encrypt
Expand Down Expand Up @@ -3111,9 +3130,9 @@ class StubGenerator: public StubCodeGenerator {

// Increment the counter, store it back
__ orr(v0, __ T16B, v16, v16);
__ rev32(v16, __ T16B, v16);
__ addv(v16, __ T4S, v16, v4);
__ rev32(v16, __ T16B, v16);
__ rev64(v16, __ T16B, v16);
be_add_128_64(v16, v16, v4, /*tmp*/v5);
__ rev64(v16, __ T16B, v16);
__ st1(v16, __ T16B, counter); // Save the incremented counter back

__ b(inner_loop);
Expand Down Expand Up @@ -3161,7 +3180,7 @@ class StubGenerator: public StubCodeGenerator {
// Keys should already be loaded into the correct registers

__ ld1(v0, __ T16B, counter); // v0 contains the first counter
__ rev32(v16, __ T16B, v0); // v16 contains byte-reversed counter
__ rev64(v16, __ T16B, v0); // v16 contains byte-reversed counter

// AES/CTR loop
{
Expand All @@ -3171,11 +3190,11 @@ class StubGenerator: public StubCodeGenerator {
// Setup the counters
__ movi(v8, __ T4S, 0);
__ movi(v9, __ T4S, 1);
__ ins(v8, __ S, v9, 3, 3); // v8 contains { 0, 0, 0, 1 }
__ ins(v8, __ S, v9, 2, 2); // v8 contains { 0, 1 }

for (FloatRegister f = v0; f < v0 + bulk_width; f++) {
__ rev32(f, __ T16B, v16);
__ addv(v16, __ T4S, v16, v8);
__ rev64(f, __ T16B, v16);
be_add_128_64(v16, v16, v8, /*tmp*/v9);
}

__ ld1(v8, v9, v10, v11, __ T16B, __ post(in, 4 * 16));
Expand Down Expand Up @@ -3203,7 +3222,7 @@ class StubGenerator: public StubCodeGenerator {
}

// Save the counter back where it goes
__ rev32(v16, __ T16B, v16);
__ rev64(v16, __ T16B, v16);
__ st1(v16, __ T16B, counter);

__ pop(saved_regs, sp);
Expand Down
31 changes: 31 additions & 0 deletions src/hotspot/cpu/x86/assembler_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2633,6 +2633,16 @@ void Assembler::ktestd(KRegister src1, KRegister src2) {
emit_int16((unsigned char)0x99, (0xC0 | encode));
}


void Assembler::kshiftlbl(KRegister dst, KRegister src, int imm8) {
assert(VM_Version::supports_avx512dq(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), 0 , src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int16(0x32, (0xC0 | encode));
emit_int8(imm8);
}


void Assembler::movb(Address dst, int imm8) {
InstructionMark im(this);
prefix(dst);
Expand Down Expand Up @@ -3948,6 +3958,14 @@ void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, Compa
emit_int24(0x3E, (0xC0 | encode), vcc);
}

void Assembler::evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
assert(VM_Version::supports_avx512vlbw(), "");
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int24(0x1E, (0xC0 | encode), vcc);
}

void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) {
assert(VM_Version::supports_avx512vlbw(), "");
InstructionMark im(this);
Expand Down Expand Up @@ -6574,6 +6592,19 @@ void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector
emit_operand(dst, src);
}

void Assembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
assert(VM_Version::supports_evex(), "");
assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
attributes.set_is_evex_instruction();
attributes.set_embedded_opmask_register_specifier(mask);
if (merge) {
attributes.reset_is_clear_context();
}
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int16((unsigned char)0xD4, (0xC0 | encode));
}

void Assembler::psubb(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
Expand Down
8 changes: 8 additions & 0 deletions src/hotspot/cpu/x86/assembler_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1492,6 +1492,8 @@ class Assembler : public AbstractAssembler {

void ktestql(KRegister dst, KRegister src);

void kshiftlbl(KRegister dst, KRegister src, int imm8);

void movdl(XMMRegister dst, Register src);
void movdl(Register dst, XMMRegister src);
void movdl(XMMRegister dst, Address src);
Expand Down Expand Up @@ -1727,6 +1729,8 @@ class Assembler : public AbstractAssembler {
void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len);

void evpcmpuq(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);

void pcmpeqw(XMMRegister dst, XMMRegister src);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
Expand Down Expand Up @@ -2249,6 +2253,10 @@ class Assembler : public AbstractAssembler {
void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);

// Leaf level assembler routines for masked operations.
void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
// void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);

// Sub packed integers
void psubb(XMMRegister dst, XMMRegister src);
void psubw(XMMRegister dst, XMMRegister src);
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/x86/macroAssembler_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -964,6 +964,8 @@ class MacroAssembler: public Assembler {
void roundDec(XMMRegister key, int rnum);
void lastroundDec(XMMRegister key, int rnum);
void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask);
void ev_add128(XMMRegister xmmdst, XMMRegister xmmsrc1, XMMRegister xmmsrc2,
int vector_len, KRegister ktmp, Register rscratch = noreg);

public:
void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
Expand Down
59 changes: 38 additions & 21 deletions src/hotspot/cpu/x86/macroAssembler_x86_aes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,19 @@ void MacroAssembler::avx_ghash(Register input_state, Register htbl,
vpxor(xmm15, xmm15, xmm15, Assembler::AVX_128bit);
}

// Add 128-bit integers in xmmsrc1 to xmmsrc2, then place the result in xmmdst.
// Clobber ktmp and rscratch.
// Used by aesctr_encrypt.
void MacroAssembler::ev_add128(XMMRegister xmmdst, XMMRegister xmmsrc1, XMMRegister xmmsrc2,
int vector_len, KRegister ktmp, Register rscratch) {
vpaddq(xmmdst, xmmsrc1, xmmsrc2, vector_len);
evpcmpuq(ktmp, xmmdst, xmmsrc2, lt, vector_len); // set mask[0/1] bit if addq to dst[0/1] wraps
kshiftlbl(ktmp, ktmp, 1); // mask[1] <- mask[0], mask[0] <- 0, etc

evpaddq(xmmdst, ktmp, xmmdst, xmm17, /*merge*/true,
vector_len); // dst[1]++ if mask[1] set
}

// AES Counter Mode using VAES instructions
void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
Register len_reg, Register used, Register used_addr, Register saved_encCounter_start) {
Expand Down Expand Up @@ -831,19 +844,23 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
//shuffle counter using lbswap_mask
vpshufb(xmm8, xmm8, xmm16, Assembler::AVX_512bit);

// Vector value to propagate carries
evmovdquq(xmm17, ExternalAddress(StubRoutines::x86::counter_mask_ones_addr()), Assembler::AVX_512bit, r15);
// pre-increment and propagate counter values to zmm9-zmm15 registers.
// Linc0 increments the zmm8 by 1 (initial value being 0), Linc4 increments the counters zmm9-zmm15 by 4
// The counter is incremented after each block i.e. 16 bytes is processed;
// each zmm register has 4 counter values as its MSB
// the counters are incremented in parallel
vpaddd(xmm8, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 64), Assembler::AVX_512bit, r15);//linc0
vpaddd(xmm9, xmm8, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//linc4(rip)
vpaddd(xmm10, xmm9, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm11, xmm10, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm12, xmm11, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm13, xmm12, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm14, xmm13, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
vpaddd(xmm15, xmm14, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15);//Linc4(rip)
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 64), Assembler::AVX_512bit, r15 /*rscratch*/);//linc0
ev_add128(xmm8, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 128), Assembler::AVX_512bit, r15 /*rscratch*/);//linc4
ev_add128(xmm9, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm10, xmm9, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm11, xmm10, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm12, xmm11, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm13, xmm12, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm14, xmm13, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);
ev_add128(xmm15, xmm14, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15);

// load linc32 mask in zmm register.linc32 increments counter by 32
evmovdquq(xmm19, ExternalAddress(StubRoutines::x86::counter_mask_addr() + 256), Assembler::AVX_512bit, r15);//Linc32
Expand Down Expand Up @@ -891,21 +908,21 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
// This is followed by incrementing counter values in zmm8-zmm15.
// Since we will be processing 32 blocks at a time, the counter is incremented by 32.
roundEnc(xmm21, 7);
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm8, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm22, 7);
vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm9, xmm9, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm23, 7);
vpaddq(xmm10, xmm10, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm10, xmm10, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm24, 7);
vpaddq(xmm11, xmm11, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm11, xmm11, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm25, 7);
vpaddq(xmm12, xmm12, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm12, xmm12, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm26, 7);
vpaddq(xmm13, xmm13, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm13, xmm13, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm27, 7);
vpaddq(xmm14, xmm14, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm14, xmm14, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm28, 7);
vpaddq(xmm15, xmm15, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm15, xmm15, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
roundEnc(xmm29, 7);

cmpl(rounds, 52);
Expand Down Expand Up @@ -983,8 +1000,8 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
vpshufb(xmm3, xmm11, xmm16, Assembler::AVX_512bit);
evpxorq(xmm3, xmm3, xmm20, Assembler::AVX_512bit);
// Increment counter values by 16
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
vpaddq(xmm9, xmm9, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm8, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
ev_add128/*!!!*/(xmm9, xmm9, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
// AES encode rounds
roundEnc(xmm21, 3);
roundEnc(xmm22, 3);
Expand Down Expand Up @@ -1051,7 +1068,7 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
vpshufb(xmm1, xmm9, xmm16, Assembler::AVX_512bit);
evpxorq(xmm1, xmm1, xmm20, Assembler::AVX_512bit);
// increment counter by 8
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm8, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
// AES encode
roundEnc(xmm21, 1);
roundEnc(xmm22, 1);
Expand Down Expand Up @@ -1109,7 +1126,7 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
vpshufb(xmm0, xmm8, xmm16, Assembler::AVX_512bit);
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_512bit);
// Increment counter
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_512bit);
ev_add128/*!!!*/(xmm8, xmm8, xmm19, Assembler::AVX_512bit, /*ktmp*/k1, r15 /*rscratch*/);
vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_512bit);
vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_512bit);
Expand Down Expand Up @@ -1159,7 +1176,7 @@ void MacroAssembler::aesctr_encrypt(Register src_addr, Register dest_addr, Regis
evpxorq(xmm0, xmm0, xmm20, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm21, Assembler::AVX_128bit);
// Increment counter by 1
vpaddq(xmm8, xmm8, xmm19, Assembler::AVX_128bit);
ev_add128/*!!!*/(xmm8, xmm8, xmm19, Assembler::AVX_128bit, /*ktmp*/k1, r15 /*rscratch*/);
vaesenc(xmm0, xmm0, xmm22, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm23, Assembler::AVX_128bit);
vaesenc(xmm0, xmm0, xmm24, Assembler::AVX_128bit);
Expand Down
15 changes: 14 additions & 1 deletion src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4424,7 +4424,19 @@ class StubGenerator: public StubCodeGenerator {
return start;
}

// Vector AES Counter implementation
// Vector AES Counter implementation

address counter_mask_ones_addr() {
__ align64();
StubCodeMark mark(this, "StubRoutines", "counter_mask_addr");
address start = __ pc();
for (int i = 0; i < 4; i ++) {
__ emit_data64(0x0000000000000000, relocInfo::none);
__ emit_data64(0x0000000000000001, relocInfo::none);
}
return start;
}

address generate_counterMode_VectorAESCrypt() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
Expand Down Expand Up @@ -7641,6 +7653,7 @@ address generate_avx_ghash_processBlocks() {
if (UseAESCTRIntrinsics) {
if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) {
StubRoutines::x86::_counter_mask_addr = counter_mask_addr();
StubRoutines::x86::_counter_mask_ones_addr = counter_mask_ones_addr();
StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt();
} else {
StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask();
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/cpu/x86/stubRoutines_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ address StubRoutines::x86::_avx2_shuffle_base64 = NULL;
address StubRoutines::x86::_avx2_input_mask_base64 = NULL;
address StubRoutines::x86::_avx2_lut_base64 = NULL;
address StubRoutines::x86::_counter_mask_addr = NULL;
address StubRoutines::x86::_counter_mask_ones_addr = NULL;
address StubRoutines::x86::_lookup_lo_base64 = NULL;
address StubRoutines::x86::_lookup_hi_base64 = NULL;
address StubRoutines::x86::_lookup_lo_base64url = NULL;
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/cpu/x86/stubRoutines_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ class x86 {
// byte flip mask for sha512
static address _pshuffle_byte_flip_mask_addr_sha512;
static address _counter_mask_addr;
static address _counter_mask_ones_addr;
// Masks for base64
static address _encoding_table_base64;
static address _shuffle_base64;
Expand Down Expand Up @@ -343,6 +344,7 @@ class x86 {
static address base64_avx2_input_mask_addr() { return _avx2_input_mask_base64; }
static address base64_avx2_lut_addr() { return _avx2_lut_base64; }
static address counter_mask_addr() { return _counter_mask_addr; }
static address counter_mask_ones_addr() { return _counter_mask_ones_addr; }
static address base64_vbmi_lookup_lo_addr() { return _lookup_lo_base64; }
static address base64_vbmi_lookup_hi_addr() { return _lookup_hi_base64; }
static address base64_vbmi_lookup_lo_url_addr() { return _lookup_lo_base64url; }
Expand Down
Loading

0 comments on commit 859a935

Please sign in to comment.