Angel Pons has uploaded this change for review. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: [WIP] nb/intel/x4x/raminit: Do cosmetic fixes ......................................................................
[WIP] nb/intel/x4x/raminit: Do cosmetic fixes
This is mostly line reflowing to make use of the increased line length limit of 96 characters.
Ideally, this should be tested to ensure behavior was not modified.
Change-Id: Ie04ec4e493fc2c45e25521869c2f4e5b5a8d26cc Signed-off-by: Angel Pons th3fanbus@gmail.com --- M src/northbridge/intel/x4x/dq_dqs.c M src/northbridge/intel/x4x/raminit.c M src/northbridge/intel/x4x/raminit_ddr23.c M src/northbridge/intel/x4x/rcven.c 4 files changed, 258 insertions(+), 449 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/56/35156/1
diff --git a/src/northbridge/intel/x4x/dq_dqs.c b/src/northbridge/intel/x4x/dq_dqs.c index ed372b5..f671f62 100644 --- a/src/northbridge/intel/x4x/dq_dqs.c +++ b/src/northbridge/intel/x4x/dq_dqs.c @@ -23,8 +23,7 @@ #include "x4x.h" #include "iomap.h"
-static void print_dll_setting(const struct dll_setting *dll_setting, - u8 default_verbose) +static void print_dll_setting(const struct dll_setting *dll_setting, u8 default_verbose) { u8 debug_level = default_verbose ? BIOS_DEBUG : RAM_DEBUG;
@@ -71,15 +70,13 @@ if (dq_dqs_setting->tap < limit.tap0) { dq_dqs_setting->db_en = 1; dq_dqs_setting->db_sel = 1; - } else if ((dq_dqs_setting->tap == limit.tap0) - && (dq_dqs_setting->pi < limit.pi0)) { + } else if ((dq_dqs_setting->tap == limit.tap0) && (dq_dqs_setting->pi < limit.pi0)) { dq_dqs_setting->db_en = 1; dq_dqs_setting->db_sel = 1; } else if (dq_dqs_setting->tap < limit.tap1) { dq_dqs_setting->db_en = 0; dq_dqs_setting->db_sel = 0; - } else if ((dq_dqs_setting->tap == limit.tap1) - && (dq_dqs_setting->pi < limit.pi1)) { + } else if ((dq_dqs_setting->tap == limit.tap1) && (dq_dqs_setting->pi < limit.pi1)) { dq_dqs_setting->db_en = 0; dq_dqs_setting->db_sel = 0; } else { @@ -90,11 +87,9 @@
const static u8 max_tap[3] = {12, 10, 13};
-static int increment_dq_dqs(const struct sysinfo *s, - struct dll_setting *dq_dqs_setting) +static int increment_dq_dqs(const struct sysinfo *s, struct dll_setting *dq_dqs_setting) { - u8 max_tap_val = max_tap[s->selected_timings.mem_clk - - MEM_CLOCK_800MHz]; + u8 max_tap_val = max_tap[s->selected_timings.mem_clk - MEM_CLOCK_800MHz];
if (dq_dqs_setting->pi < 6) { dq_dqs_setting->pi += 1; @@ -117,11 +112,9 @@ return CB_SUCCESS; }
-static int decrement_dq_dqs(const struct sysinfo *s, - struct dll_setting *dq_dqs_setting) +static int decrement_dq_dqs(const struct sysinfo *s, struct dll_setting *dq_dqs_setting) { - u8 max_tap_val = max_tap[s->selected_timings.mem_clk - - MEM_CLOCK_800MHz]; + u8 max_tap_val = max_tap[s->selected_timings.mem_clk - MEM_CLOCK_800MHz];
if (dq_dqs_setting->pi > 0) { dq_dqs_setting->pi -= 1; @@ -175,8 +168,7 @@ FAILING = 1 };
-static u8 test_dq_aligned(const struct sysinfo *s, - const u8 channel) +static u8 test_dq_aligned(const struct sysinfo *s, const u8 channel) { u32 address; int rank, lane; @@ -190,17 +182,14 @@ for (count1 = 0; count1 < WT_PATTERN_SIZE; count1++) { if ((count1 % 16) == 0) MCHBAR32(0xf90) = 1; - const u32 pattern = - write_training_schedule[count1]; + const u32 pattern = write_training_schedule[count1]; write32((u32 *)address + 8 * count1, pattern); - write32((u32 *)address + 8 * count1 + 4, - pattern); + write32((u32 *)address + 8 * count1 + 4, pattern); }
const u32 good = write_training_schedule[count]; write32(&data[0], read32((u32 *)address + 8 * count)); - write32(&data[4], - read32((u32 *)address + 8 * count + 4)); + write32(&data[4], read32((u32 *)address + 8 * count + 4)); FOR_EACH_BYTELANE(lane) { u8 expected = (good >> ((lane % 4) * 8)) & 0xff; if (data[lane] != expected) @@ -217,13 +206,11 @@ * This function finds either failing or succeeding writes by increasing DQ. * When it has found a failing or succeeding setting it will increase DQ * another 10 times to make sure the result is consistent. - * This is probably done because lanes cannot be trained independent from - * each other. + * This is probably done because lanes cannot be trained independent from each other. */ static int find_dq_limit(const struct sysinfo *s, const u8 channel, struct dll_setting dq_setting[TOTAL_BYTELANES], - u8 dq_lim[TOTAL_BYTELANES], - const enum training_modes expected_result) + u8 dq_lim[TOTAL_BYTELANES], const enum training_modes expected_result) { int status = CB_SUCCESS; int lane; @@ -251,10 +238,10 @@ succes_mask &= ~(1 << lane); } if (status == CB_ERR) { - printk(BIOS_CRIT, "Could not find a case of %s " - "writes on CH%d, lane %d\n", - expected_result == FAILING ? "failing" - : "succeeding", channel, lane); + printk(BIOS_CRIT, + "Could not find a case of %s writes on CH%d, lane %d\n", + expected_result == FAILING ? "failing" : "succeeding", + channel, lane); return CB_ERR; } } @@ -267,7 +254,7 @@ * the DQ and the DQS signal. * The training works this way: * - start from the DQS delay values (DQ is always later than DQS) - * - increment the DQ delay until a succeeding write is found on all bytelayes, + * - increment the DQ delay until a succeeding write is found on all bytelanes, * on all ranks on a channel and save these values * - again increment the DQ delay until write start to fail on all bytelanes and * save that value @@ -297,19 +284,15 @@ /* Start from DQS settings */ memcpy(dq_setting, s->dqs_settings[channel], sizeof(dq_setting));
- if (find_dq_limit(s, channel, dq_setting, dq_lower, - SUCCEEDING)) { - printk(BIOS_CRIT, - "Could not find working lower limit DQ setting\n"); + if (find_dq_limit(s, channel, dq_setting, dq_lower, SUCCEEDING)) { + printk(BIOS_CRIT, "Could not find working lower limit DQ setting\n"); return CB_ERR; }
memcpy(dq_upper, dq_lower, sizeof(dq_lower));
- if (find_dq_limit(s, channel, dq_setting, dq_upper, - FAILING)) { - printk(BIOS_WARNING, - "Could not find failing upper limit DQ setting\n"); + if (find_dq_limit(s, channel, dq_setting, dq_upper, FAILING)) { + printk(BIOS_WARNING, "Could not find failing upper limit DQ setting\n"); return CB_ERR; }
@@ -318,15 +301,14 @@ dq_upper[lane] -= CONSISTENCY - 1; u8 dq_center = (dq_upper[lane] + dq_lower[lane]) / 2;
- printk(RAM_DEBUG, "Centered value for DQ DLL:" - " ch%d, lane %d, #steps = %d\n", + printk(RAM_DEBUG, + "Centered value for DQ DLL: ch%d, lane %d, #steps = %d\n", channel, lane, dq_center); for (i = 0; i < dq_center; i++) { /* Should never happen */ if (increment_dq_dqs(s, &s->dq_settings[channel][lane]) == CB_ERR) - printk(BIOS_ERR, - "Huh? write training overflowed!!\n"); + printk(BIOS_ERR, "Huh? write training overflowed!!\n"); } }
@@ -419,13 +401,10 @@ }
if (expected_result == SUCCEEDING) { - printk(BIOS_CRIT, - "Could not find RT DQS setting\n"); + printk(BIOS_CRIT, "Could not find RT DQS setting\n"); return CB_ERR; } else { - printk(RAM_DEBUG, - "Read succeeded over all DQS" - " settings, continuing\n"); + printk(RAM_DEBUG, "Read succeeded over all DQS settings, continuing\n"); return CB_SUCCESS; } } @@ -437,18 +416,17 @@ * The training works this way: * - start from the lowest possible delay (0) on all bytelanes * - increment the DQS rx delays until a succeeding write is found on all - * bytelayes, on all ranks on a channel and save these values + * bytelanes, on all ranks on a channel and save these values * - again increment the DQS rx delay until write start to fail on all bytelanes * and save that value * - use the mean between the saved succeeding and failing value * - note0: bytelanes cannot be trained independently, so the delays need to be * adjusted and tested for all of them at the same time * - note1: At this stage all ranks effectively use the rank0's rt_dqs settings, - * but later on their respective settings are used (TODO where is the - * 'switch' register??). So programming the results for all ranks at the end - * of the training. Programming on all ranks instead of all populated ranks, - * seems to be required, most likely because the signals can't really be generated - * separately. + * but later on their respective settings are used (TODO where is the 'switch' register??) + * So programming the results for all ranks at the end of the training. Programming on all + * ranks instead of all populated ranks, seems to be required, most likely because the + * signals can't really be generated separately. */ int do_read_training(struct sysinfo *s) { @@ -465,8 +443,7 @@
for (loop = 0; loop < RT_LOOPS; loop++) { FOR_EACH_POPULATED_CHANNEL(s->dimms, channel) { - printk(RAM_DEBUG, "Doing DQS read training on CH%d\n", - channel); + printk(RAM_DEBUG, "Doing DQS read training on CH%d\n", channel);
/* Write pattern to strobe address */ FOR_EACH_POPULATED_RANK_IN_CHANNEL(s->dimms, channel, rank) { @@ -480,8 +457,7 @@
memset(dqs_lower, 0, sizeof(dqs_lower)); memset(&dqs_setting, 0, sizeof(dqs_setting)); - if (rt_find_dqs_limit(s, channel, dqs_setting, dqs_lower, - SUCCEEDING)) { + if (rt_find_dqs_limit(s, channel, dqs_setting, dqs_lower, SUCCEEDING)) { printk(BIOS_CRIT, "Could not find working lower limit DQS setting\n"); return CB_ERR; @@ -490,8 +466,7 @@ FOR_EACH_BYTELANE(lane) dqs_upper[lane] = dqs_lower[lane];
- if (rt_find_dqs_limit(s, channel, dqs_setting, dqs_upper, - FAILING)) { + if (rt_find_dqs_limit(s, channel, dqs_setting, dqs_upper, FAILING)) { printk(BIOS_CRIT, "Could not find failing upper limit DQ setting\n"); return CB_ERR; @@ -513,19 +488,16 @@ FOR_EACH_BYTELANE(lane) { saved_dqs_center[channel][lane] /= RT_LOOPS; while (saved_dqs_center[channel][lane]--) { - if(rt_increment_dqs(&s->rt_dqs[channel][lane]) - == CB_ERR) + if (rt_increment_dqs(&s->rt_dqs[channel][lane]) == CB_ERR) /* Should never happen */ - printk(BIOS_ERR, - "Huh? read training overflowed!!\n"); + printk(BIOS_ERR, "Huh? read training overflowed!!\n"); } - /* Later on separate settings for each rank are used so program + /* Later on, separate settings for each rank are used, so program all of them */ FOR_EACH_RANK_IN_CHANNEL(rank) - rt_set_dqs(channel, lane, rank, - &s->rt_dqs[channel][lane]); - printk(BIOS_DEBUG, "\tlane%d: %d.%d\n", - lane, s->rt_dqs[channel][lane].tap, + rt_set_dqs(channel, lane, rank, &s->rt_dqs[channel][lane]); + printk(BIOS_DEBUG, "\tlane%d: %d.%d\n", lane, + s->rt_dqs[channel][lane].tap, s->rt_dqs[channel][lane].pi); } } @@ -641,8 +613,8 @@
#define N_SAMPLES 5
-static void sample_dq(const struct sysinfo *s, u8 channel, u8 rank, - u8 high_found[8]) { +static void sample_dq(const struct sysinfo *s, u8 channel, u8 rank, u8 high_found[8]) +{ u32 address = test_address(channel, rank); int samples, lane;
@@ -652,8 +624,7 @@ write32((u32 *)address + 4, 0x12341234); udelay(5); FOR_EACH_BYTELANE(lane) { - u8 dq_high = (MCHBAR8(0x561 + 0x400 * channel - + (lane * 4)) >> 7) & 1; + u8 dq_high = (MCHBAR8(0x561 + 0x400 * channel + (lane * 4)) >> 7) & 1; high_found[lane] += dq_high; } } @@ -680,14 +651,12 @@ if (bytelane_ok & (1 << lane)) continue;
- printk(RAM_SPEW, "%d, %d, %02d, %d," - " lane%d sample: %d\n", + printk(RAM_SPEW, "%d, %d, %02d, %d, lane%d sample: %d\n", dqs_setting[lane].coarse, dqs_setting[lane].clk_delay, dqs_setting[lane].tap, dqs_setting[lane].pi, - lane, - dq_sample[lane]); + lane, dq_sample[lane]);
if (dq_sample[lane] > 0) { if (decrement_dq_dqs(s, &dqs_setting[lane])) { @@ -723,8 +692,7 @@ dqs_setting[lane].clk_delay, dqs_setting[lane].tap, dqs_setting[lane].pi, - lane, - dq_sample[lane]); + lane, dq_sample[lane]);
if (dq_sample[lane] == N_SAMPLES) { bytelane_ok |= (1 << lane); @@ -760,19 +728,18 @@
/* * DDR3 uses flyby topology where the clock signal takes a different path - * than the data signal, to allow for better signal intergrity. - * Therefore the delay on the data signals needs to account for this. - * This is done by by sampleling the the DQS write (tx) signal back over - * the DQ signal and looking for delay values where the sample transitions - * from high to low. - * Here the following is done: + * than the data signal, to allow for better signal integrity. + * Therefore, the delay on the data signals needs to account for this. + * This is done by by sampling the the DQS write (tx) signal back over the DQ signal + * and looking for delay values where the sample transitions from high to low. + * Here, the following is done: * - enable write levelling on the first populated rank * - disable output on other populated ranks * - start from safe DQS (tx) delays (other transitions can be * found at different starting values but are generally bad) * - loop0: decrease DQS (tx) delays until low is sampled, * loop1: increase DQS (tx) delays until high is sampled, - * That way we are sure to hit a low-high transition + * That way, we are sure to hit a low-high transition * - put all ranks in normal mode of operation again * - note: All ranks need to be leveled together */ @@ -782,7 +749,7 @@ u8 config, rank0, rank1, lane; struct dll_setting dq_setting;
- u8 chanconfig_lut[16]={0, 6, 4, 6, 7, 3, 1, 3, 5, 2, 0, 2, 7, 3, 1, 3}; + u8 chanconfig_lut[16] = {0, 6, 4, 6, 7, 3, 1, 3, 5, 2, 0, 2, 7, 3, 1, 3};
u8 odt_force[8][4] = { /* [Config][leveling rank] */ {0x5, 0x6, 0x5, 0x9}, @@ -801,50 +768,38 @@ printk(BIOS_DEBUG, "\tCH%d\n", ch); config = chanconfig_lut[s->dimm_config[ch]];
- MCHBAR8(0x5d8 + 0x400 * ch) = - MCHBAR8(0x5d8 + 0x400 * ch) & ~0x0e; + MCHBAR8(0x5d8 + 0x400 * ch) = MCHBAR8(0x5d8 + 0x400 * ch) & ~0x0e; MCHBAR16(0x5c4 + 0x400 * ch) = (MCHBAR16(0x5c4 + 0x400 * ch) & ~0x3fff) | 0x3fff; - MCHBAR8(0x265 + 0x400 * ch) = - MCHBAR8(0x265 + 0x400 * ch) & ~0x1f; + MCHBAR8(0x265 + 0x400 * ch) = MCHBAR8(0x265 + 0x400 * ch) & ~0x1f; /* find the first populated rank */ FOR_EACH_POPULATED_RANK_IN_CHANNEL(s->dimms, ch, rank0) break;
- /* Enable WL for the first populated rank and disable output - for others */ + /* Enable WL for the first populated rank and disable output for others */ FOR_EACH_POPULATED_RANK_IN_CHANNEL(s->dimms, ch, rank1) set_rank_write_level(s, ch, config, rank1, rank0, 1);
- MCHBAR8(0x298 + 2 + 0x400 * ch) = - (MCHBAR8(0x298 + 2 + 0x400 * ch) & ~0x0f) - | odt_force[config][rank0]; - MCHBAR8(0x271 + 0x400 * ch) = (MCHBAR8(0x271 + 0x400 * ch) - & ~0x7e) | 0x4e; - MCHBAR8(0x5d9 + 0x400 * ch) = - (MCHBAR8(0x5d9 + 0x400 * ch) & ~0x04) | 0x04; - MCHBAR32(0x1a0) = (MCHBAR32(0x1a0) & ~0x07ffffff) - | 0x00014000; + MCHBAR8(0x298 + 2 + 0x400 * ch) = (MCHBAR8(0x298 + 2 + 0x400 * ch) & ~0x0f) + | odt_force[config][rank0]; + + MCHBAR8(0x271 + 0x400 * ch) = (MCHBAR8(0x271 + 0x400 * ch) & ~0x7e) | 0x4e; + MCHBAR8(0x5d9 + 0x400 * ch) = (MCHBAR8(0x5d9 + 0x400 * ch) & ~0x04) | 0x04; + MCHBAR32(0x1a0) = (MCHBAR32(0x1a0) & ~0x07ffffff) | 0x00014000;
if (increment_to_dqs_edge(s, ch, rank0)) die("Write Leveling failed!");
- MCHBAR8(0x298 + 2 + 0x400 * ch) = - MCHBAR8(0x298 + 2 + 0x400 * ch) & ~0x0f; - MCHBAR8(0x271 + 0x400 * ch) = - (MCHBAR8(0x271 + 0x400 * ch) & ~0x7e) - | 0x0e; - MCHBAR8(0x5d9 + 0x400 * ch) = - (MCHBAR8(0x5d9 + 0x400 * ch) & ~0x04); - MCHBAR32(0x1a0) = (MCHBAR32(0x1a0) - & ~0x07ffffff) | 0x00555801; + MCHBAR8(0x298 + 2 + 0x400 * ch) = MCHBAR8(0x298 + 2 + 0x400 * ch) & ~0x0f; + MCHBAR8(0x271 + 0x400 * ch) = (MCHBAR8(0x271 + 0x400 * ch) & ~0x7e) | 0x0e; + MCHBAR8(0x5d9 + 0x400 * ch) = (MCHBAR8(0x5d9 + 0x400 * ch) & ~0x04); + MCHBAR32(0x1a0) = (MCHBAR32(0x1a0) & ~0x07ffffff) | 0x00555801;
/* Disable WL on the trained rank */ set_rank_write_level(s, ch, config, rank0, rank0, 0); send_jedec_cmd(s, rank0, ch, NORMALOP_CMD, 1 << 12);
- MCHBAR8(0x5d8 + 0x400 * ch) = (MCHBAR8(0x5d8 + 0x400 * ch) - & ~0x0e) | 0x0e; + MCHBAR8(0x5d8 + 0x400 * ch) = (MCHBAR8(0x5d8 + 0x400 * ch) & ~0x0e) | 0x0e; MCHBAR16(0x5c4 + 0x400 * ch) = (MCHBAR16(0x5c4 + 0x400 * ch) & ~0x3fff) | 0x1807; MCHBAR8(0x265 + 0x400 * ch) = MCHBAR8(0x265 + 0x400 * ch) & ~0x1f; @@ -856,7 +811,7 @@
MCHBAR8(0x5dc) = (MCHBAR8(0x5dc) & ~0x80) | 0x80;
- /* Increment DQ (rx) dll setting by a standard amount past DQS, + /* Increment DQ (rx) dll setting by a standard amount past DQS. This is further trained in write training. */ switch (s->selected_timings.mem_clk) { default: diff --git a/src/northbridge/intel/x4x/raminit.c b/src/northbridge/intel/x4x/raminit.c index 8013af9..32f0897 100644 --- a/src/northbridge/intel/x4x/raminit.c +++ b/src/northbridge/intel/x4x/raminit.c @@ -59,8 +59,7 @@ return spd_ddr3_calc_unique_crc(raw_spd, len); }
-static enum cb_err verify_spds(const u8 *spd_map, - const struct sysinfo *ctrl_cached) +static enum cb_err verify_spds(const u8 *spd_map, const struct sysinfo *ctrl_cached) { int i; u16 crc; @@ -69,11 +68,9 @@ if (!(spd_map[i])) continue; int len = smbus_read_byte(spd_map[i], 0); - if (len < 0 && ctrl_cached->dimms[i].card_type - == RAW_CARD_UNPOPULATED) + if (len < 0 && ctrl_cached->dimms[i].card_type == RAW_CARD_UNPOPULATED) continue; - if (len > 0 && ctrl_cached->dimms[i].card_type - == RAW_CARD_UNPOPULATED) + if (len > 0 && ctrl_cached->dimms[i].card_type == RAW_CARD_UNPOPULATED) return CB_ERR;
if (ctrl_cached->spd_type == DDR2) @@ -104,8 +101,7 @@
#define CTRL_MIN_TCLK_DDR2 TCK_400MHZ
-static void select_cas_dramfreq_ddr2(struct sysinfo *s, - const struct abs_timings *saved_timings) +static void select_cas_dramfreq_ddr2(struct sysinfo *s, const struct abs_timings *saved_timings) { u8 try_cas; /* Currently only these CAS are supported */ @@ -178,8 +174,7 @@
if (!(decoded_dimm.width & (0x08 | 0x10))) {
- printk(BIOS_ERR, - "DIMM%d Unsupported width: x%d. Disabling dimm\n", + printk(BIOS_ERR, "DIMM%d Unsupported width: x%d. Disabling dimm\n", dimm_idx, s->dimms[dimm_idx].width); return CB_ERR; } @@ -215,28 +210,20 @@
saved_timings->cas_supported &= decoded_dimm.cas_supported;
- saved_timings->min_tRAS = - MAX(saved_timings->min_tRAS, decoded_dimm.tRAS); - saved_timings->min_tRP = - MAX(saved_timings->min_tRP, decoded_dimm.tRP); - saved_timings->min_tRCD = - MAX(saved_timings->min_tRCD, decoded_dimm.tRCD); - saved_timings->min_tWR = - MAX(saved_timings->min_tWR, decoded_dimm.tWR); - saved_timings->min_tRFC = - MAX(saved_timings->min_tRFC, decoded_dimm.tRFC); - saved_timings->min_tWTR = - MAX(saved_timings->min_tWTR, decoded_dimm.tWTR); - saved_timings->min_tRRD = - MAX(saved_timings->min_tRRD, decoded_dimm.tRRD); - saved_timings->min_tRTP = - MAX(saved_timings->min_tRTP, decoded_dimm.tRTP); + saved_timings->min_tRAS = MAX(saved_timings->min_tRAS, decoded_dimm.tRAS); + saved_timings->min_tRP = MAX(saved_timings->min_tRP, decoded_dimm.tRP); + saved_timings->min_tRCD = MAX(saved_timings->min_tRCD, decoded_dimm.tRCD); + saved_timings->min_tWR = MAX(saved_timings->min_tWR, decoded_dimm.tWR); + saved_timings->min_tRFC = MAX(saved_timings->min_tRFC, decoded_dimm.tRFC); + saved_timings->min_tWTR = MAX(saved_timings->min_tWTR, decoded_dimm.tWTR); + saved_timings->min_tRRD = MAX(saved_timings->min_tRRD, decoded_dimm.tRRD); + saved_timings->min_tRTP = MAX(saved_timings->min_tRTP, decoded_dimm.tRTP); + for (i = 0; i < 8; i++) { if (!(saved_timings->cas_supported & (1 << i))) saved_timings->min_tCLK_cas[i] = 0; else - saved_timings->min_tCLK_cas[i] = - MAX(saved_timings->min_tCLK_cas[i], + saved_timings->min_tCLK_cas[i] = MAX(saved_timings->min_tCLK_cas[i], decoded_dimm.cycle_time[i]); }
@@ -257,12 +244,11 @@ *tCLK = 0; }
-static void select_cas_dramfreq_ddr3(struct sysinfo *s, - struct abs_timings *saved_timings) +static void select_cas_dramfreq_ddr3(struct sysinfo *s, struct abs_timings *saved_timings) { /* * various constraints must be fulfilled: - * CAS * tCK < 20ns == 160MTB + * CAS * tCK < 20ns == 160MTB * tCK_max >= tCK >= tCK_min * CAS >= roundup(tAA_min/tCK) * CAS supported @@ -304,9 +290,8 @@
min_tCLK = MAX(min_tCLK, saved_timings->min_tclk); if (min_tCLK == 0) { - printk(BIOS_ERR, "DRAM frequency is under lowest supported " - "frequency (400 MHz). Increasing to 400 MHz " - "as last resort"); + printk(BIOS_ERR, "DRAM frequency is lower than supported frequency (400 MHz)." + " Increasing to 400 MHz as last resort.\n"); min_tCLK = TCK_400MHZ; }
@@ -317,10 +302,7 @@ try_CAS = DIV_ROUND_UP(saved_timings->min_tAA, min_tCLK); printk(BIOS_SPEW, "Trying CAS %u, tCK %u.\n", try_CAS, min_tCLK); for (; try_CAS <= DDR3_MAX_CAS; try_CAS++) { - /* - * cas_supported is encoded like the SPD which starts - * at CAS=4. - */ + /* cas_supported is encoded like the SPD which starts at CAS=4. */ if ((saved_timings->cas_supported << 4) & (1 << try_CAS)) break; } @@ -331,9 +313,8 @@ break; } /* - * If no valid tCLK / CAS pair could be found for a tCLK - * increase it after which it gets normalised. This means - * that a lower frequency gets tried. + * If no valid tCLK / CAS pair could be found for a tCLK, increase it after + * which it gets normalised. This means that a lower frequency gets tried. */ min_tCLK++; } @@ -354,20 +335,22 @@ } }
-/* With DDR3 and 533MHz mem clock and an enabled internal gfx device the display - is not usable in non stacked mode, so select stacked mode accordingly */ +/* With DDR3 and 533MHz mem clock and an enabled internal gfx device, the display + is not usable in non-stacked mode, so select stacked mode accordingly */ static void workaround_stacked_mode(struct sysinfo *s) { u32 deven; /* Only a problem on DDR3 */ if (s->spd_type == DDR2) return; + /* Does not matter if only one channel is populated */ - if (!CHANNEL_IS_POPULATED(s->dimms, 0) - || !CHANNEL_IS_POPULATED(s->dimms, 1)) + if (!CHANNEL_IS_POPULATED(s->dimms, 0) || !CHANNEL_IS_POPULATED(s->dimms, 1)) return; + if (s->selected_timings.mem_clk != MEM_CLOCK_1066MHz) return; + /* IGD0EN gets disabled if not present before this code runs */ deven = pci_read_config32(PCI_DEV(0, 0, 0), D0F0_DEVEN); if (deven & IGD0EN) @@ -394,9 +377,9 @@ s->dimms[dimm_idx].width = (decoded_dimm.width >> 3) - 1; /* * This boils down to: - * "Except for the x16 configuration, all DDR3 devices have a - * 1KB page size. For the x16 configuration, the page size is 2KB - * for all densities except the 256Mb device, which has a 1KB page size." + * "Except for the x16 configuration, all DDR3 devices have a 1KB page size. + * For the x16 configuration, the page size is 2KB for all densities except + * the 256Mb device, which has a 1KB page size." * Micron, 'TN-47-16 Designing for High-Density DDR2 Memory' */ s->dimms[dimm_idx].page_size = decoded_dimm.width * @@ -408,28 +391,19 @@ s->dimms[dimm_idx].rows = decoded_dimm.row_bits; s->dimms[dimm_idx].cols = decoded_dimm.col_bits;
- saved_timings->min_tRAS = - MAX(saved_timings->min_tRAS, decoded_dimm.tRAS); - saved_timings->min_tRP = - MAX(saved_timings->min_tRP, decoded_dimm.tRP); - saved_timings->min_tRCD = - MAX(saved_timings->min_tRCD, decoded_dimm.tRCD); - saved_timings->min_tWR = - MAX(saved_timings->min_tWR, decoded_dimm.tWR); - saved_timings->min_tRFC = - MAX(saved_timings->min_tRFC, decoded_dimm.tRFC); - saved_timings->min_tWTR = - MAX(saved_timings->min_tWTR, decoded_dimm.tWTR); - saved_timings->min_tRRD = - MAX(saved_timings->min_tRRD, decoded_dimm.tRRD); - saved_timings->min_tRTP = - MAX(saved_timings->min_tRTP, decoded_dimm.tRTP); - saved_timings->min_tAA = - MAX(saved_timings->min_tAA, decoded_dimm.tAA); + saved_timings->min_tRAS = MAX(saved_timings->min_tRAS, decoded_dimm.tRAS); + saved_timings->min_tRP = MAX(saved_timings->min_tRP, decoded_dimm.tRP); + saved_timings->min_tRCD = MAX(saved_timings->min_tRCD, decoded_dimm.tRCD); + saved_timings->min_tWR = MAX(saved_timings->min_tWR, decoded_dimm.tWR); + saved_timings->min_tRFC = MAX(saved_timings->min_tRFC, decoded_dimm.tRFC); + saved_timings->min_tWTR = MAX(saved_timings->min_tWTR, decoded_dimm.tWTR); + saved_timings->min_tRRD = MAX(saved_timings->min_tRRD, decoded_dimm.tRRD); + saved_timings->min_tRTP = MAX(saved_timings->min_tRTP, decoded_dimm.tRTP); + saved_timings->min_tAA = MAX(saved_timings->min_tAA, decoded_dimm.tAA); + saved_timings->cas_supported &= decoded_dimm.cas_supported;
- s->dimms[dimm_idx].spd_crc = spd_ddr3_calc_unique_crc(raw_spd, - raw_spd[0]); + s->dimms[dimm_idx].spd_crc = spd_ddr3_calc_unique_crc(raw_spd, raw_spd[0]);
s->dimms[dimm_idx].mirrored = decoded_dimm.flags.pins_mirrored;
@@ -437,33 +411,22 @@ }
-static void select_discrete_timings(struct sysinfo *s, - const struct abs_timings *timings) +static void select_discrete_timings(struct sysinfo *s, const struct abs_timings *timings) { - s->selected_timings.tRAS = DIV_ROUND_UP(timings->min_tRAS, - s->selected_timings.tclk); - s->selected_timings.tRP = DIV_ROUND_UP(timings->min_tRP, - s->selected_timings.tclk); - s->selected_timings.tRCD = DIV_ROUND_UP(timings->min_tRCD, - s->selected_timings.tclk); - s->selected_timings.tWR = DIV_ROUND_UP(timings->min_tWR, - s->selected_timings.tclk); - s->selected_timings.tRFC = DIV_ROUND_UP(timings->min_tRFC, - s->selected_timings.tclk); - s->selected_timings.tWTR = DIV_ROUND_UP(timings->min_tWTR, - s->selected_timings.tclk); - s->selected_timings.tRRD = DIV_ROUND_UP(timings->min_tRRD, - s->selected_timings.tclk); - s->selected_timings.tRTP = DIV_ROUND_UP(timings->min_tRTP, - s->selected_timings.tclk); + s->selected_timings.tRAS = DIV_ROUND_UP(timings->min_tRAS, s->selected_timings.tclk); + s->selected_timings.tRP = DIV_ROUND_UP(timings->min_tRP, s->selected_timings.tclk); + s->selected_timings.tRCD = DIV_ROUND_UP(timings->min_tRCD, s->selected_timings.tclk); + s->selected_timings.tWR = DIV_ROUND_UP(timings->min_tWR, s->selected_timings.tclk); + s->selected_timings.tRFC = DIV_ROUND_UP(timings->min_tRFC, s->selected_timings.tclk); + s->selected_timings.tWTR = DIV_ROUND_UP(timings->min_tWTR, s->selected_timings.tclk); + s->selected_timings.tRRD = DIV_ROUND_UP(timings->min_tRRD, s->selected_timings.tclk); + s->selected_timings.tRTP = DIV_ROUND_UP(timings->min_tRTP, s->selected_timings.tclk); } static void print_selected_timings(struct sysinfo *s) { printk(BIOS_DEBUG, "Selected timings:\n"); - printk(BIOS_DEBUG, "\tFSB: %dMHz\n", - fsb_to_mhz(s->selected_timings.fsb_clk)); - printk(BIOS_DEBUG, "\tDDR: %dMHz\n", - ddr_to_mhz(s->selected_timings.mem_clk)); + printk(BIOS_DEBUG, "\tFSB: %dMHz\n", fsb_to_mhz(s->selected_timings.fsb_clk)); + printk(BIOS_DEBUG, "\tDDR: %dMHz\n", ddr_to_mhz(s->selected_timings.mem_clk));
printk(BIOS_DEBUG, "\tCAS: %d\n", s->selected_timings.CAS); printk(BIOS_DEBUG, "\ttRAS: %d\n", s->selected_timings.tRAS); @@ -541,16 +504,14 @@ if (s->spd_type == DDR2){ if (ddr2_save_dimminfo(i, raw_spd, &saved_timings, s)) { printk(BIOS_WARNING, - "Encountered problems with SPD, " - "skipping this DIMM.\n"); + "Encountered problems with SPD, skipping this DIMM.\n"); s->dimms[i].card_type = RAW_CARD_UNPOPULATED; continue; } } else { /* DDR3 */ if (ddr3_save_dimminfo(i, raw_spd, &saved_timings, s)) { printk(BIOS_WARNING, - "Encountered problems with SPD, " - "skipping this DIMM.\n"); + "Encountered problems with SPD, skipping this DIMM.\n"); /* something in decoded SPD was unsupported */ s->dimms[i].card_type = RAW_CARD_UNPOPULATED; continue; @@ -565,6 +526,7 @@ select_cas_dramfreq_ddr2(s, &saved_timings); else select_cas_dramfreq_ddr3(s, &saved_timings); + select_discrete_timings(s, &saved_timings); workaround_stacked_mode(s); } @@ -587,11 +549,9 @@ else die("Dual-rank x16 not supported\n"); } - s->dimm_config[chan] |= - dimm_config << (i % DIMMS_PER_CHANNEL) * 2; + s->dimm_config[chan] |= dimm_config << (i % DIMMS_PER_CHANNEL) * 2; } - printk(BIOS_DEBUG, " Config[CH%d] : %d\n", chan, - s->dimm_config[chan]); + printk(BIOS_DEBUG, " Config[CH%d] : %d\n", chan, s->dimm_config[chan]); }
} @@ -604,11 +564,9 @@ if (boot_path >= 1) { pmsts = MCHBAR32(PMSTS_MCHBAR); if (!(pmsts & 1)) - printk(BIOS_DEBUG, - "Channel 0 possibly not in self refresh\n"); + printk(BIOS_DEBUG, "Channel 0 possibly not in self refresh\n"); if (!(pmsts & 2)) - printk(BIOS_DEBUG, - "Channel 1 possibly not in self refresh\n"); + printk(BIOS_DEBUG, "Channel 1 possibly not in self refresh\n"); }
pmcon2 = pci_read_config8(PCI_DEV(0, 0x1f, 0), 0xa2); @@ -646,8 +604,7 @@
memset(&s, 0, sizeof(struct sysinfo));
- cache_not_found = mrc_cache_get_current(MRC_TRAINING_DATA, - MRC_CACHE_VERSION, &rdev); + cache_not_found = mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &rdev);
if (cache_not_found || (region_device_sz(&rdev) < sizeof(s))) { if (boot_path == BOOT_PATH_RESUME) { @@ -665,8 +622,7 @@
/* verify MRC cache for fast boot */ if (boot_path != BOOT_PATH_RESUME && ctrl_cached) { - /* check SPD checksum to make sure the DIMMs haven't been - * replaced */ + /* check SPD checksum to make sure the DIMMs haven't been replaced */ fast_boot = verify_spds(spd_map, ctrl_cached) == CB_SUCCESS; if (!fast_boot) { printk(BIOS_DEBUG, "SPD checksums don't match," @@ -698,8 +654,7 @@
/* Detect dimms per channel */ reg8 = pci_read_config8(PCI_DEV(0, 0, 0), 0xe9); - printk(BIOS_DEBUG, "Dimms per channel: %d\n", - (reg8 & 0x10) ? 1 : 2); + printk(BIOS_DEBUG, "Dimms per channel: %d\n", (reg8 & 0x10) ? 1 : 2);
mchinfo_ddr2(&s);
@@ -720,8 +675,8 @@
cbmem_was_inited = !cbmem_recovery(s.boot_path == BOOT_PATH_RESUME); if (!fast_boot) - mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, - &s, sizeof(s)); + mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &s, sizeof(s)); + if (s.boot_path == BOOT_PATH_RESUME && !cbmem_was_inited) { /* Failed S3 resume, reset to come up cleanly */ system_reset(); diff --git a/src/northbridge/intel/x4x/raminit_ddr23.c b/src/northbridge/intel/x4x/raminit_ddr23.c index 1e40b9c..d7152ef 100644 --- a/src/northbridge/intel/x4x/raminit_ddr23.c +++ b/src/northbridge/intel/x4x/raminit_ddr23.c @@ -345,8 +345,7 @@
for (rank = 0; rank < 4; rank++) { MCHBAR32_AND_OR(0x400 * ch + 0x5b4 + rank * 4, ~(0x201 << lane), - (setting->db_en << (9 + lane)) | - (setting->db_sel << lane)); + (setting->db_en << (9 + lane)) | (setting->db_sel << lane));
MCHBAR32_AND_OR(0x400*ch + 0x5c8 + rank * 4, ~(0x3 << (16 + lane * 2)), @@ -354,21 +353,18 @@
MCHBAR8(0x400*ch + 0x520 + lane * 4 + rank) = (MCHBAR8(0x400*ch + 0x520 + lane * 4) & ~0x7f) | - (setting->pi << 4) | - setting->tap; + (setting->pi << 4) | setting->tap; } }
void dqset(u8 ch, u8 lane, const struct dll_setting *setting) { int rank; - MCHBAR32_AND_OR(0x400 * ch + 0x5fc, ~(1 << (lane * 4)), - setting->coarse << (lane * 4)); + MCHBAR32_AND_OR(0x400 * ch + 0x5fc, ~(1 << (lane * 4)), setting->coarse << (lane * 4));
for (rank = 0; rank < 4; rank++) { MCHBAR32_AND_OR(0x400 * ch + 0x5a4 + rank * 4, ~(0x201 << lane), - (setting->db_en << (9 + lane)) | - (setting->db_sel << lane)); + (setting->db_en << (9 + lane)) | (setting->db_sel << lane));
MCHBAR32_AND_OR(0x400 * ch + 0x5c8 + rank * 4, ~(0x3 << (lane * 2)), setting->clk_delay << (2 * lane)); @@ -378,8 +374,7 @@ } }
-void rt_set_dqs(u8 channel, u8 lane, u8 rank, - struct rt_dqs_setting *dqs_setting) +void rt_set_dqs(u8 channel, u8 lane, u8 rank, struct rt_dqs_setting *dqs_setting) { u16 saved_tap = MCHBAR16(0x540 + 0x400 * channel + lane * 4); u16 saved_pi = MCHBAR16(0x542 + 0x400 * channel + lane * 4); @@ -519,29 +514,24 @@ /* tWL - x ?? */ MCHBAR8_AND_OR(0x400*i + 0x240, ~0xf0, 0 << 4); MCHBAR8_AND_OR(0x400*i + 0x240, ~0xf, adjusted_cas); - MCHBAR16_AND_OR(0x400*i + 0x265, ~0x3f00, - (adjusted_cas + 9) << 8); + MCHBAR16_AND_OR(0x400*i + 0x265, ~0x3f00, (adjusted_cas + 9) << 8);
reg16 = (s->selected_timings.tRAS << 11) | ((twl + 4 + s->selected_timings.tWR) << 6) | ((2 + MAX(s->selected_timings.tRTP, 2)) << 2) | 1; MCHBAR16(0x400*i + 0x250) = reg16;
- reg32 = (bankmod << 21) | - (s->selected_timings.tRRD << 17) | + reg32 = (bankmod << 21) | (s->selected_timings.tRRD << 17) | (s->selected_timings.tRP << 13) | - ((s->selected_timings.tRP + trpmod) << 9) | - s->selected_timings.tRFC; + ((s->selected_timings.tRP + trpmod) << 9) | s->selected_timings.tRFC; if (bankmod == 0) { reg8 = (MCHBAR8(0x400*i + 0x26f) >> 1) & 1; if (s->spd_type == DDR2) reg32 |= ddr2_x252_tab[s->selected_timings.mem_clk - - MEM_CLOCK_667MHz][reg8][pagemod] - << 22; + - MEM_CLOCK_667MHz][reg8][pagemod] << 22; else reg32 |= ddr3_x252_tab[s->selected_timings.mem_clk - - MEM_CLOCK_800MHz][reg8][pagemod] - << 22; + - MEM_CLOCK_800MHz][reg8][pagemod] << 22; } MCHBAR32(0x400*i + 0x252) = reg32;
@@ -557,9 +547,9 @@
MCHBAR16_AND_OR(0x400*i + 0x260, ~0x3fe, (s->spd_type == DDR2 ? 100 : 256) << 1); + MCHBAR8(0x400*i + 0x264) = 0xff; - MCHBAR8_AND_OR(0x400*i + 0x25d, ~0x3f, - s->selected_timings.tRAS); + MCHBAR8_AND_OR(0x400*i + 0x25d, ~0x3f, s->selected_timings.tRAS); MCHBAR16(0x400*i + 0x244) = 0x2310;
switch (s->selected_timings.mem_clk) { @@ -671,8 +661,7 @@ if (s->spd_type == DDR3) { MCHBAR8(0x114) = 0x42; reg16 = (512 - MAX(5, s->selected_timings.tRFC + 10000 - / ddr_to_ps[s->selected_timings.mem_clk])) - / 2; + / ddr_to_ps[s->selected_timings.mem_clk])) / 2; reg16 &= 0x1ff; reg32 = (reg16 << 22) | (0x80 << 14) | (0xa << 9); } @@ -686,8 +675,7 @@ u16 reg16 = 0; u32 reg32 = 0;
- const u8 rank2clken[8] = { 0x04, 0x01, 0x20, 0x08, 0x01, 0x04, - 0x08, 0x10 }; + const u8 rank2clken[8] = {0x04, 0x01, 0x20, 0x08, 0x01, 0x04, 0x08, 0x10};
MCHBAR16_AND_OR(0x180, ~0x7e06, 0xc04); MCHBAR16_AND_OR(0x182, ~0x3ff, 0xc8); @@ -729,8 +717,7 @@ udelay(1); // 533ns
// ME related - MCHBAR32_AND_OR(0x1a0, ~0x7ffffff, - s->spd_type == DDR2 ? 0x551803 : 0x555801); + MCHBAR32_AND_OR(0x1a0, ~0x7ffffff, s->spd_type == DDR2 ? 0x551803 : 0x555801);
MCHBAR16_AND(0x1b4, ~0x800); if (s->spd_type == DDR2) { @@ -761,34 +748,25 @@
if (s->spd_type == DDR2) { if (!CHANNEL_IS_POPULATED(s->dimms, i)) { - printk(BIOS_DEBUG, - "No dimms in channel %d\n", i); + printk(BIOS_DEBUG, "No dimms in channel %d\n", i); reg8 = 0x3f; } else if (ONLY_DIMMA_IS_POPULATED(s->dimms, i)) { - printk(BIOS_DEBUG, - "DimmA populated only in channel %d\n", - i); + printk(BIOS_DEBUG, "DimmA populated only in channel %d\n", i); reg8 = 0x38; } else if (ONLY_DIMMB_IS_POPULATED(s->dimms, i)) { - printk(BIOS_DEBUG, - "DimmB populated only in channel %d\n", - i); + printk(BIOS_DEBUG, "DimmB populated only in channel %d\n", i); reg8 = 0x7; } else if (BOTH_DIMMS_ARE_POPULATED(s->dimms, i)) { - printk(BIOS_DEBUG, - "Both dimms populated in channel %d\n", - i); + printk(BIOS_DEBUG, "Both dimms populated in channel %d\n", i); reg8 = 0; } else { die("Unhandled case\n"); } - MCHBAR32_AND_OR(0x400*i + 0x5a0, ~0x3f000000, - (u32)(reg8 << 24)); + MCHBAR32_AND_OR(0x400*i + 0x5a0, ~0x3f000000, (u32)(reg8 << 24));
} else { /* DDR3 */ FOR_EACH_POPULATED_RANK_IN_CHANNEL(s->dimms, i, r) { - MCHBAR8_AND(0x400 * i + 0x5a0 + 3, - ~rank2clken[r + i * 4]); + MCHBAR8_AND(0x400 * i + 0x5a0 + 3, ~rank2clken[r + i * 4]); } }
@@ -811,8 +789,7 @@ FOR_EACH_POPULATED_CHANNEL(s->dimms, i) { MCHBAR16_AND_OR(0x400*i + 0x5f0, ~0x3fc, 0x3fc); MCHBAR32_AND(0x400*i + 0x5fc, ~0xcccccccc); - MCHBAR8_AND_OR(0x400*i + 0x5d9, ~0xf0, - s->spd_type == DDR2 ? 0x70 : 0x60); + MCHBAR8_AND_OR(0x400*i + 0x5d9, ~0xf0, s->spd_type == DDR2 ? 0x70 : 0x60); MCHBAR16_AND_OR(0x400*i + 0x590, ~0xffff, s->spd_type == DDR2 ? 0x5555 : 0xa955); } @@ -971,52 +948,42 @@ FOR_EACH_POPULATED_CHANNEL_AND_BYTELANE(s->dimms, ch, lane) { switch (s->selected_timings.mem_clk) { case MEM_CLOCK_667MHz: - memcpy(s->dqs_settings[ch], - default_ddr2_667_dqs, + memcpy(s->dqs_settings[ch], default_ddr2_667_dqs, sizeof(s->dqs_settings[ch])); - memcpy(s->dq_settings[ch], - default_ddr2_667_dq, + memcpy(s->dq_settings[ch], default_ddr2_667_dq, sizeof(s->dq_settings[ch])); s->rt_dqs[ch][lane].tap = 7; s->rt_dqs[ch][lane].pi = 2; break; case MEM_CLOCK_800MHz: if (s->spd_type == DDR2) { - memcpy(s->dqs_settings[ch], - default_ddr2_800_dqs, + memcpy(s->dqs_settings[ch], default_ddr2_800_dqs, sizeof(s->dqs_settings[ch])); - memcpy(s->dq_settings[ch], - default_ddr2_800_dq, + memcpy(s->dq_settings[ch], default_ddr2_800_dq, sizeof(s->dq_settings[ch])); s->rt_dqs[ch][lane].tap = 7; s->rt_dqs[ch][lane].pi = 0; } else { /* DDR3 */ - memcpy(s->dqs_settings[ch], - default_ddr3_800_dqs[s->nmode - 1], + memcpy(s->dqs_settings[ch], default_ddr3_800_dqs[s->nmode - 1], sizeof(s->dqs_settings[ch])); - memcpy(s->dq_settings[ch], - default_ddr3_800_dq[s->nmode - 1], + memcpy(s->dq_settings[ch], default_ddr3_800_dq[s->nmode - 1], sizeof(s->dq_settings[ch])); s->rt_dqs[ch][lane].tap = 6; s->rt_dqs[ch][lane].pi = 3; } break; case MEM_CLOCK_1066MHz: - memcpy(s->dqs_settings[ch], - default_ddr3_1067_dqs[s->nmode - 1], + memcpy(s->dqs_settings[ch], default_ddr3_1067_dqs[s->nmode - 1], sizeof(s->dqs_settings[ch])); - memcpy(s->dq_settings[ch], - default_ddr3_1067_dq[s->nmode - 1], + memcpy(s->dq_settings[ch], default_ddr3_1067_dq[s->nmode - 1], sizeof(s->dq_settings[ch])); s->rt_dqs[ch][lane].tap = 5; s->rt_dqs[ch][lane].pi = 3; break; case MEM_CLOCK_1333MHz: - memcpy(s->dqs_settings[ch], - default_ddr3_1333_dqs[s->nmode - 1], + memcpy(s->dqs_settings[ch], default_ddr3_1333_dqs[s->nmode - 1], sizeof(s->dqs_settings[ch])); - memcpy(s->dq_settings[ch], - default_ddr3_1333_dq[s->nmode - 1], + memcpy(s->dq_settings[ch], default_ddr3_1333_dq[s->nmode - 1], sizeof(s->dq_settings[ch])); s->rt_dqs[ch][lane].tap = 7; s->rt_dqs[ch][lane].pi = 0; @@ -1040,8 +1007,7 @@ FOR_EACH_POPULATED_CHANNEL(s->dimms, ch) { FOR_EACH_BYTELANE(lane) { FOR_EACH_RANK_IN_CHANNEL(rank) { - rt_set_dqs(ch, lane, rank, - &s->rt_dqs[ch][lane]); + rt_set_dqs(ch, lane, rank, &s->rt_dqs[ch][lane]); } dqsset(ch, lane, &s->dqs_settings[ch][lane]); dqset(ch, lane, &s->dq_settings[ch][lane]); @@ -1052,20 +1018,20 @@ static void prog_rcomp(struct sysinfo *s) { u8 i, j, k, reg8; - const u32 ddr2_x32a[8] = { 0x04040404, 0x06050505, 0x09090807, 0x0D0C0B0A, - 0x04040404, 0x08070605, 0x0C0B0A09, 0x100F0E0D }; - const u16 ddr2_x378[6] = { 0, 0xAAAA, 0x7777, 0x7777, 0x7777, 0x7777 }; - const u32 ddr2_x382[6] = { 0, 0x02020202, 0x02020202, 0x02020202, 0x04030303, 0x04030303 }; - const u32 ddr2_x386[6] = { 0, 0x03020202, 0x03020202, 0x03020202, 0x05040404, 0x05040404 }; - const u32 ddr2_x38a[6] = { 0, 0x04040303, 0x04040303, 0x04040303, 0x07070605, 0x07070605 }; - const u32 ddr2_x38e[6] = { 0, 0x06060505, 0x06060505, 0x06060505, 0x09090808, 0x09090808 }; - const u32 ddr2_x392[6] = { 0, 0x02020202, 0x02020202, 0x02020202, 0x03030202, 0x03030202 }; - const u32 ddr2_x396[6] = { 0, 0x03030202, 0x03030202, 0x03030202, 0x05040303, 0x05040303 }; - const u32 ddr2_x39a[6] = { 0, 0x04040403, 0x04040403, 0x04040403, 0x07070605, 0x07070605 }; - const u32 ddr2_x39e[6] = { 0, 0x06060505, 0x06060505, 0x06060505, 0x08080808, 0x08080808 }; + const u32 ddr2_x32a[8] = {0x04040404, 0x06050505, 0x09090807, 0x0D0C0B0A, + 0x04040404, 0x08070605, 0x0C0B0A09, 0x100F0E0D}; + const u16 ddr2_x378[6] = {0, 0xAAAA, 0x7777, 0x7777, 0x7777, 0x7777}; + const u32 ddr2_x382[6] = {0, 0x02020202, 0x02020202, 0x02020202, 0x04030303, 0x04030303}; + const u32 ddr2_x386[6] = {0, 0x03020202, 0x03020202, 0x03020202, 0x05040404, 0x05040404}; + const u32 ddr2_x38a[6] = {0, 0x04040303, 0x04040303, 0x04040303, 0x07070605, 0x07070605}; + const u32 ddr2_x38e[6] = {0, 0x06060505, 0x06060505, 0x06060505, 0x09090808, 0x09090808}; + const u32 ddr2_x392[6] = {0, 0x02020202, 0x02020202, 0x02020202, 0x03030202, 0x03030202}; + const u32 ddr2_x396[6] = {0, 0x03030202, 0x03030202, 0x03030202, 0x05040303, 0x05040303}; + const u32 ddr2_x39a[6] = {0, 0x04040403, 0x04040403, 0x04040403, 0x07070605, 0x07070605}; + const u32 ddr2_x39e[6] = {0, 0x06060505, 0x06060505, 0x06060505, 0x08080808, 0x08080808};
const u32 ddr3_x32a[8] = {0x06060606, 0x06060606, 0x0b090807, 0x12110f0d, - 0x06060606, 0x08070606, 0x0d0b0a09, 0x16161511}; + 0x06060606, 0x08070606, 0x0d0b0a09, 0x16161511}; const u16 ddr3_x378[6] = {0, 0xbbbb, 0x6666, 0x6666, 0x6666, 0x6666}; const u32 ddr3_x382[6] = {0, 0x05050505, 0x04040404, 0x04040404, 0x34343434, 0x34343434}; const u32 ddr3_x386[6] = {0, 0x05050505, 0x04040404, 0x04040404, 0x34343434, 0x34343434}; @@ -1080,8 +1046,8 @@ const u32 *x32a, *x382, *x386, *x38a, *x38e; const u32 *x392, *x396, *x39a, *x39e;
- const u16 addr[6] = { 0x31c, 0x374, 0x3a2, 0x3d0, 0x3fe, 0x42c }; - u8 bit[6] = { 0, 0, 1, 1, 0, 0 }; + const u16 addr[6] = {0x31c, 0x374, 0x3a2, 0x3d0, 0x3fe, 0x42c}; + u8 bit[6] = {0, 0, 1, 1, 0, 0};
if (s->spd_type == DDR2) { x32a = ddr2_x32a; @@ -1110,45 +1076,29 @@ FOR_EACH_POPULATED_CHANNEL(s->dimms, i) { for (j = 0; j < 6; j++) { if (j == 0) { - MCHBAR32_AND_OR(0x400*i + addr[j], ~0xff000, - 0xaa000); - MCHBAR16_AND_OR(0x400*i + 0x320, ~0xffff, - 0x6666); + MCHBAR32_AND_OR(0x400*i + addr[j], ~0xff000, 0xaa000); + MCHBAR16_AND_OR(0x400*i + 0x320, ~0xffff, 0x6666); for (k = 0; k < 8; k++) { - MCHBAR32_AND_OR(0x400*i + addr[j] + - 0xe + (k << 2), - ~0x3f3f3f3f, x32a[k]); - MCHBAR32_AND_OR(0x400*i + addr[j] + - 0x2e + (k << 2), - ~0x3f3f3f3f, x32a[k]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0xe + (k << 2), + ~0x3f3f3f3f, x32a[k]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x2e + (k << 2), + ~0x3f3f3f3f, x32a[k]); } } else { - MCHBAR16_AND_OR(0x400*i + addr[j], - ~0xf000, 0xa000); - MCHBAR16_AND_OR(0x400*i + addr[j] + 4, - ~0xffff, x378[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0xe, - ~0x3f3f3f3f, x382[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x12, - ~0x3f3f3f3f, x386[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x16, - ~0x3f3f3f3f, x38a[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x1a, - ~0x3f3f3f3f, x38e[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x1e, - ~0x3f3f3f3f, x392[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x22, - ~0x3f3f3f3f, x396[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x26, - ~0x3f3f3f3f, x39a[j]); - MCHBAR32_AND_OR(0x400*i + addr[j] + 0x2a, - ~0x3f3f3f3f, x39e[j]); + MCHBAR16_AND_OR(0x400*i + addr[j], ~0xf000, 0xa000); + MCHBAR16_AND_OR(0x400*i + addr[j] + 4, ~0xffff, x378[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x0e, ~0x3f3f3f3f, x382[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x12, ~0x3f3f3f3f, x386[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x16, ~0x3f3f3f3f, x38a[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x1a, ~0x3f3f3f3f, x38e[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x1e, ~0x3f3f3f3f, x392[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x22, ~0x3f3f3f3f, x396[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x26, ~0x3f3f3f3f, x39a[j]); + MCHBAR32_AND_OR(0x400*i + addr[j] + 0x2a, ~0x3f3f3f3f, x39e[j]); } - if (s->spd_type == DDR3 && - BOTH_DIMMS_ARE_POPULATED(s->dimms, i)) { - MCHBAR16_AND_OR(0x378 + 0x400 * i, - ~0xffff, 0xcccc); - } + if (s->spd_type == DDR3 && BOTH_DIMMS_ARE_POPULATED(s->dimms, i)) + MCHBAR16_AND_OR(0x378 + 0x400 * i, ~0xffff, 0xcccc); + MCHBAR8_AND_OR(0x400*i + addr[j], ~1, bit[j]); } reg8 = (s->spd_type == DDR2) ? 0x12 : 0x36; @@ -1214,15 +1164,11 @@
FOR_EACH_POPULATED_CHANNEL(s->dimms, i) { if (s->spd_type == DDR2) { - MCHBAR16(0x400 * i + 0x298) = - ddr2_odt[s->dimm_config[i]][1]; - MCHBAR16(0x400 * i + 0x294) = - ddr2_odt[s->dimm_config[i]][0]; + MCHBAR16(0x400 * i + 0x298) = ddr2_odt[s->dimm_config[i]][1]; + MCHBAR16(0x400 * i + 0x294) = ddr2_odt[s->dimm_config[i]][0]; } else { - MCHBAR16(0x400 * i + 0x298) = - ddr3_odt[s->dimm_config[i]][1]; - MCHBAR16(0x400 * i + 0x294) = - ddr3_odt[s->dimm_config[i]][0]; + MCHBAR16(0x400 * i + 0x298) = ddr3_odt[s->dimm_config[i]][1]; + MCHBAR16(0x400 * i + 0x294) = ddr3_odt[s->dimm_config[i]][0]; } u16 reg16 = MCHBAR16(0x400*i + 0x29c); reg16 &= ~0xfff; @@ -1275,7 +1221,7 @@ MCHBAR16(C1DRB0) = 0x0002; MCHBAR16(C1DRB1) = 0x0004; MCHBAR16(C1DRB2) = 0x0006; - /* In stacked mode the last present rank on ch1 needs to have its + /* In stacked mode, the last present rank on ch1 needs to have its size doubled in c1drbx */ MCHBAR16(C1DRB3) = 0x0010; MCHBAR8_OR(0x111, STACKED_MEM); @@ -1320,16 +1266,13 @@ u8 data8 = cmd; u32 data32;
- if (s->spd_type == DDR3 && (r & 1) - && s->dimms[ch * 2 + (r >> 1)].mirrored) { + if (s->spd_type == DDR3 && (r & 1) && s->dimms[ch * 2 + (r >> 1)].mirrored) data8 = (u8)mirror_shift_bit(data8, 4); - }
MCHBAR8_AND_OR(0x271, ~0x3e, data8); MCHBAR8_AND_OR(0x671, ~0x3e, data8); data32 = val; - if (s->spd_type == DDR3 && (r & 1) - && s->dimms[ch * 2 + (r >> 1)].mirrored) { + if (s->spd_type == DDR3 && (r & 1) && s->dimms[ch * 2 + (r >> 1)].mirrored) { data32 = mirror_shift_bit(data32, 3); data32 = mirror_shift_bit(data32, 5); data32 = mirror_shift_bit(data32, 7); @@ -1474,11 +1417,9 @@ MCHBAR32(0x400 * channel + 0x248) = reg32;
FOR_EACH_BYTELANE(lane) { - medium |= s->rcven_t[channel].medium[lane] - << (lane * 2); - coarse_offset |= - (s->rcven_t[channel].coarse_offset[lane] & 0x3) - << (lane * 2); + medium |= s->rcven_t[channel].medium[lane] << (lane * 2); + coarse_offset |= (s->rcven_t[channel].coarse_offset[lane] & 0x3) + << (lane * 2);
pi_tap = MCHBAR8(0x400 * channel + 0x560 + lane * 4); pi_tap &= ~0x7f; @@ -1575,11 +1516,9 @@ } }
- if (ONLY_DIMMA_IS_POPULATED(s->dimms, 0) || - ONLY_DIMMB_IS_POPULATED(s->dimms, 0)) + if (ONLY_DIMMA_IS_POPULATED(s->dimms, 0) || ONLY_DIMMB_IS_POPULATED(s->dimms, 0)) MCHBAR8_OR(0x260, 1); - if (ONLY_DIMMA_IS_POPULATED(s->dimms, 1) || - ONLY_DIMMB_IS_POPULATED(s->dimms, 1)) + if (ONLY_DIMMA_IS_POPULATED(s->dimms, 1) || ONLY_DIMMB_IS_POPULATED(s->dimms, 1)) MCHBAR8_OR(0x660, 1);
// DRB @@ -1679,26 +1618,22 @@
/* * "108h[15:0] Single Channel Offset for Ch0" - * This is the 'limit' of the part on CH0 that cannot be matched - * with memory on CH1. MCHBAR16(0x10a) is where the dual channel - * memory on ch0s end and MCHBAR16(0x108) is the limit of the single - * channel size on ch0. + * This is the 'limit' of the part on CH0 that cannot be matched with memory on CH1. + * MCHBAR16(0x10a) is where the dual channel memory on ch0s end. + * MCHBAR16(0x108) is the limit of the single channel size on ch0. */ if (s->stacked_mode && size_ch1 != 0) { single_channel_offset = 0; } else if (size_me == 0) { if (size_ch0 > size_ch1) - single_channel_offset = dual_channel_size / 2 - + single_channel_size; + single_channel_offset = dual_channel_size / 2 + single_channel_size; else single_channel_offset = dual_channel_size / 2; } else { if ((size_ch0 > size_ch1) && ((map & 0x7) == 4)) - single_channel_offset = dual_channel_size / 2 - + single_channel_size; + single_channel_offset = dual_channel_size / 2 + single_channel_size; else - single_channel_offset = dual_channel_size / 2 - + size_me; + single_channel_offset = dual_channel_size / 2 + size_me; }
MCHBAR16(0x108) = single_channel_offset; @@ -1712,9 +1647,8 @@ u32 gfxbase, gttbase, tsegbase, reclaimbase, reclaimlimit; u32 mmiostart, umasizem; u16 ggc; - u16 ggc2uma[] = { 0, 1, 4, 8, 16, 32, 48, 64, 128, 256, 96, - 160, 224, 352 }; - u8 ggc2gtt[] = { 0, 1, 0, 2, 0, 0, 0, 0, 0, 2, 3, 4}; + u16 ggc2uma[] = {0, 1, 4, 8, 16, 32, 48, 64, 128, 256, 96, 160, 224, 352}; + u8 ggc2gtt[] = {0, 1, 0, 2, 0, 0, 0, 0, 0, 2, 3, 4}; u8 reg8;
ggc = pci_read_config16(PCI_DEV(0, 0, 0), 0x52); @@ -1751,10 +1685,8 @@ pci_write_config16(PCI_DEV(0, 0, 0), 0xb0, tolud << 4); pci_write_config16(PCI_DEV(0, 0, 0), 0xa0, tom >> 6); if (reclaim) { - pci_write_config16(PCI_DEV(0, 0, 0), 0x98, - (u16)(reclaimbase >> 6)); - pci_write_config16(PCI_DEV(0, 0, 0), 0x9a, - (u16)(reclaimlimit >> 6)); + pci_write_config16(PCI_DEV(0, 0, 0), 0x98, (u16)(reclaimbase >> 6)); + pci_write_config16(PCI_DEV(0, 0, 0), 0x9a, (u16)(reclaimlimit >> 6)); } pci_write_config16(PCI_DEV(0, 0, 0), 0xa2, touud); pci_write_config32(PCI_DEV(0, 0, 0), 0xa4, gfxbase << 20); @@ -2017,8 +1949,7 @@ MCHBAR8_OR(0x1a8, 0x4);
// Set freq - MCHBAR32_AND_OR(0xc00, ~0x70, - (s->selected_timings.mem_clk << 4) | (1 << 10)); + MCHBAR32_AND_OR(0xc00, ~0x70, (s->selected_timings.mem_clk << 4) | (1 << 10));
// Overwrite freq if chipset rejects it s->selected_timings.mem_clk = (MCHBAR8(0xc00) & 0x70) >> 4; @@ -2124,18 +2055,14 @@ FOR_EACH_POPULATED_CHANNEL(s->dimms, ch) { reg32 = (2 << 18); reg32 |= post_jedec_tab[s->selected_timings.fsb_clk] - [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][0] - << 13; + [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][0] << 13; if (s->selected_timings.mem_clk == MEM_CLOCK_667MHz && - s->selected_timings.fsb_clk == FSB_CLOCK_1066MHz && - ch == 1) { + s->selected_timings.fsb_clk == FSB_CLOCK_1066MHz && ch == 1) { reg32 |= (post_jedec_tab[s->selected_timings.fsb_clk] - [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][1] - - 1) << 8; + [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][1] - 1) << 8; } else { reg32 |= post_jedec_tab[s->selected_timings.fsb_clk] - [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][1] - << 8; + [s->selected_timings.mem_clk - MEM_CLOCK_667MHz][1] << 8; } MCHBAR32_AND_OR(0x400*ch + 0x274, ~0xfff00, reg32); MCHBAR8_AND(0x400*ch + 0x274, ~0x80); @@ -2176,18 +2103,15 @@ volatile u32 data; FOR_EACH_POPULATED_RANK(s->dimms, ch, r) { for (bank = 0; bank < 4; bank++) { - reg32 = test_address(ch, r) | - (bank << 12); + reg32 = test_address(ch, r) | (bank << 12); write32((u32 *)reg32, 0xffffffff); data = read32((u32 *)reg32); printk(BIOS_DEBUG, "Wrote ones,"); - printk(BIOS_DEBUG, " Read: [0x%08x]=0x%08x\n", - reg32, data); + printk(BIOS_DEBUG, " Read: [0x%08x]=0x%08x\n", reg32, data); write32((u32 *)reg32, 0x00000000); data = read32((u32 *)reg32); printk(BIOS_DEBUG, "Wrote zeros,"); - printk(BIOS_DEBUG, " Read: [0x%08x]=0x%08x\n", - reg32, data); + printk(BIOS_DEBUG, " Read: [0x%08x]=0x%08x\n", reg32, data); } } } @@ -2232,11 +2156,9 @@ * and is only needed in case of ME being used. */ if (ME_UMA_SIZEMB != 0) { - if (RANK_IS_POPULATED(s->dimms, 0, 0) - || RANK_IS_POPULATED(s->dimms, 1, 0)) + if (RANK_IS_POPULATED(s->dimms, 0, 0) || RANK_IS_POPULATED(s->dimms, 1, 0)) MCHBAR8_OR(0xa2f, 1 << 0); - if (RANK_IS_POPULATED(s->dimms, 0, 1) - || RANK_IS_POPULATED(s->dimms, 1, 1)) + if (RANK_IS_POPULATED(s->dimms, 0, 1) || RANK_IS_POPULATED(s->dimms, 1, 1)) MCHBAR8_OR(0xa2f, 1 << 1); MCHBAR32_OR(0xa30, 1 << 26); } diff --git a/src/northbridge/intel/x4x/rcven.c b/src/northbridge/intel/x4x/rcven.c index 36a6ebd..a1e3894 100644 --- a/src/northbridge/intel/x4x/rcven.c +++ b/src/northbridge/intel/x4x/rcven.c @@ -55,8 +55,7 @@ return (MCHBAR8(sample_offset) >> 6) & 1; }
-static void program_timing(const struct rec_timing *timing, u8 channel, - u8 lane) +static void program_timing(const struct rec_timing *timing, u8 channel, u8 lane) { u32 reg32; u16 reg16; @@ -135,15 +134,12 @@ return 0; }
-static int decr_coarse_low(u8 channel, u8 lane, u32 addr, - struct rec_timing *timing) +static int decr_coarse_low(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { - printk(RAM_DEBUG, - " Decreasing coarse until high to low transition is found\n"); + printk(RAM_DEBUG, " Decreasing coarse until high to low transition is found\n"); while (sampledqs(addr, lane, channel) != DQS_LOW) { if (timing->coarse == 0) { - printk(BIOS_CRIT, - "Couldn't find DQS-high 0 indicator, halt\n"); + printk(BIOS_CRIT, "Couldn't find DQS-high 0 indicator, halt\n"); return -1; } timing->coarse--; @@ -154,31 +150,24 @@ return 0; }
-static int fine_search_dqs_high(u8 channel, u8 lane, u32 addr, - struct rec_timing *timing) +static int fine_search_dqs_high(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { - printk(RAM_DEBUG, - " Increasing TAP until low to high transition is found\n"); + printk(RAM_DEBUG, " Increasing TAP until low to high transition is found\n"); /* - * We use a do while loop since it happens that the strobe read - * is inconsistent, with the strobe already high. The current - * code flow results in failure later when finding the preamble, - * at which DQS needs to be high is often not the case if TAP was - * not increased at least once here. Work around this by incrementing - * TAP at least once to guarantee searching for preamble start at - * DQS high. - * This seems to be the result of hysteresis on some settings, where - * the DQS probe is influenced by its previous value. + * We use a do-while loop, since it happens that the strobe read is inconsistent, with + * the strobe already high. The current code flow results in failure later when finding + * the preamble, at which DQS needs to be high and is often not the case if TAP was not + * increased at least once here. Work around this by incrementing TAP at least once to + * guarantee searching for preamble start at DQS high. This seems to be the result of + * hysteresis on some settings, where the DQS probe is influenced by its previous value. */ if (sampledqs(addr, lane, channel) == DQS_HIGH) { - printk(BIOS_WARNING, - "DQS already HIGH... DQS probe is inconsistent!\n" + printk(BIOS_WARNING, "DQS already HIGH... DQS probe is inconsistent!\n" "Continuing....\n"); } do { if (increase_tap(timing)) { - printk(BIOS_CRIT, - "Could not find DQS-high on fine search.\n"); + printk(BIOS_CRIT, "Could not find DQS-high on fine search.\n"); return -1; } program_timing(timing, channel, lane); @@ -189,15 +178,13 @@ return 0; }
-static int find_dqs_low(u8 channel, u8 lane, u32 addr, - struct rec_timing *timing) +static int find_dqs_low(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { /* Look for DQS low, using quarter steps. */ printk(RAM_DEBUG, " Increasing medium until DQS LOW is found\n"); while (sampledqs(addr, lane, channel) != DQS_LOW) { if (increase_medium(timing)) { - printk(BIOS_CRIT, - "Coarse > 15: DQS tuning failed, halt\n"); + printk(BIOS_CRIT, "Coarse > 15: DQS tuning failed, halt\n"); return -1; } program_timing(timing, channel, lane); @@ -206,15 +193,13 @@ timing->coarse, timing->medium); return 0; } -static int find_dqs_high(u8 channel, u8 lane, u32 addr, - struct rec_timing *timing) +static int find_dqs_high(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { /* Look for DQS high, using quarter steps. */ printk(RAM_DEBUG, " Increasing medium until DQS HIGH is found\n"); while (sampledqs(addr, lane, channel) != DQS_HIGH) { if (increase_medium(timing)) { - printk(BIOS_CRIT, - "Coarse > 16: DQS tuning failed, halt\n"); + printk(BIOS_CRIT, "Coarse > 16: DQS tuning failed, halt\n"); return -1; } program_timing(timing, channel, lane); @@ -224,8 +209,7 @@ return 0; }
-static int find_dqs_edge_lowhigh(u8 channel, u8 lane, - u32 addr, struct rec_timing *timing) +static int find_dqs_edge_lowhigh(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { /* Medium search for DQS high. */ if (find_dqs_high(channel, lane, addr, timing)) @@ -241,8 +225,7 @@ return 0; }
-static int find_preamble(u8 channel, u8 lane, u32 addr, - struct rec_timing *timing) +static int find_preamble(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { /* Add a quarter step */ if (increase_medium(timing)) @@ -260,8 +243,7 @@ return 0; }
-static int calibrate_receive_enable(u8 channel, u8 lane, - u32 addr, struct rec_timing *timing) +static int calibrate_receive_enable(u8 channel, u8 lane, u32 addr, struct rec_timing *timing) { program_timing(timing, channel, lane); /* Set receive enable bit */ @@ -308,10 +290,7 @@ { int rank; u8 channel, lane, reg8; - /* - * Using the macros below the compiler warns about this possibly being - * unitialised. - */ + /* Using the macros below, the compiler warns about this possibly being unitialised. */ u32 addr = 0; struct rec_timing timing[TOTAL_BYTELANES]; u8 mincoarse; @@ -357,8 +336,7 @@ timing[lane].tap = 0; timing[lane].pi = 0;
- if (calibrate_receive_enable(channel, lane, addr, - &timing[lane])) + if (calibrate_receive_enable(channel, lane, addr, &timing[lane])) die("Receive enable calibration failed\n"); if (mincoarse > timing[lane].coarse) mincoarse = timing[lane].coarse; @@ -372,10 +350,9 @@ reg8 = 0; else reg8 = timing[lane].coarse - mincoarse; - printk(BIOS_DEBUG, "ch %d lane %d: coarse offset: %d;" - "medium: %d; tap: %d\n", - channel, lane, reg8, timing[lane].medium, - timing[lane].tap); + printk(BIOS_DEBUG, + "ch %d lane %d: coarse offset: %d; medium: %d; tap: %d\n", + channel, lane, reg8, timing[lane].medium, timing[lane].tap); s->rcven_t[channel].coarse_offset[lane] = reg8; s->rcven_t[channel].medium[lane] = timing[lane].medium; s->rcven_t[channel].tap[lane] = timing[lane].tap;
build bot (Jenkins) has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: [WIP] nb/intel/x4x/raminit: Do cosmetic fixes ......................................................................
Patch Set 1:
(8 comments)
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... File src/northbridge/intel/x4x/raminit_ddr23.c:
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1024: const u32 ddr2_x382[6] = {0, 0x02020202, 0x02020202, 0x02020202, 0x04030303, 0x04030303}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1025: const u32 ddr2_x386[6] = {0, 0x03020202, 0x03020202, 0x03020202, 0x05040404, 0x05040404}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1026: const u32 ddr2_x38a[6] = {0, 0x04040303, 0x04040303, 0x04040303, 0x07070605, 0x07070605}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1027: const u32 ddr2_x38e[6] = {0, 0x06060505, 0x06060505, 0x06060505, 0x09090808, 0x09090808}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1028: const u32 ddr2_x392[6] = {0, 0x02020202, 0x02020202, 0x02020202, 0x03030202, 0x03030202}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1029: const u32 ddr2_x396[6] = {0, 0x03030202, 0x03030202, 0x03030202, 0x05040303, 0x05040303}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1030: const u32 ddr2_x39a[6] = {0, 0x04040403, 0x04040403, 0x04040403, 0x07070605, 0x07070605}; line over 96 characters
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1031: const u32 ddr2_x39e[6] = {0, 0x06060505, 0x06060505, 0x06060505, 0x08080808, 0x08080808}; line over 96 characters
Angel Pons has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: [WIP] nb/intel/x4x/raminit: Do cosmetic fixes ......................................................................
Patch Set 1:
(1 comment)
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... File src/northbridge/intel/x4x/raminit_ddr23.c:
https://review.coreboot.org/c/coreboot/+/35156/1/src/northbridge/intel/x4x/r... PS1, Line 1024: const u32 ddr2_x382[6] = {0, 0x02020202, 0x02020202, 0x02020202, 0x04030303, 0x04030303};
line over 96 characters
Oh shut up, IT'S JUST ONE CHARACTERRRRR!
Hello Patrick Rudolph, build bot (Jenkins), Damien Zammit,
I'd like you to reexamine a change. Please visit
https://review.coreboot.org/c/coreboot/+/35156
to look at the new patch set (#2).
Change subject: nb/intel/x4x: Do cosmetic fixes ......................................................................
nb/intel/x4x: Do cosmetic fixes
This is mostly line reflowing to make use of the increased line length limit of 96 characters.
Tested with BUILD_TIMELESS=1, does not change the binary of Asus P5QL.
Change-Id: Ie04ec4e493fc2c45e25521869c2f4e5b5a8d26cc Signed-off-by: Angel Pons th3fanbus@gmail.com --- M src/northbridge/intel/x4x/acpi.c M src/northbridge/intel/x4x/dq_dqs.c M src/northbridge/intel/x4x/early_init.c M src/northbridge/intel/x4x/gma.c M src/northbridge/intel/x4x/memmap.c M src/northbridge/intel/x4x/northbridge.c M src/northbridge/intel/x4x/raminit.c M src/northbridge/intel/x4x/raminit_ddr23.c M src/northbridge/intel/x4x/raminit_tables.c M src/northbridge/intel/x4x/rcven.c M src/northbridge/intel/x4x/romstage.c M src/northbridge/intel/x4x/x4x.h 12 files changed, 1,057 insertions(+), 968 deletions(-)
git pull ssh://review.coreboot.org:29418/coreboot refs/changes/56/35156/2
build bot (Jenkins) has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: nb/intel/x4x: Do cosmetic fixes ......................................................................
Patch Set 2:
(2 comments)
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/r... File src/northbridge/intel/x4x/raminit_ddr23.c:
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/r... PS2, Line 1320: if (s->spd_type == DDR3 && (r & 1) && s->dimms[ch * 2 + (r >> 1)].mirrored) { braces {} are not necessary for single statement blocks
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/x... File src/northbridge/intel/x4x/x4x.h:
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/x... PS2, Line 189: #define IF_DIMM_POPULATED(dimms, idx) if (DIMM_IS_POPULATED(dimms, idx)) Macros starting with if should be enclosed by a do - while loop to avoid possible if/else logic defects
Arthur Heymans has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: nb/intel/x4x: Do cosmetic fixes ......................................................................
Patch Set 2:
(1 comment)
This seems to add a lot of newlines?
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/d... File src/northbridge/intel/x4x/dq_dqs.c:
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/d... PS2, Line 100: Why are there newlines here?
Angel Pons has posted comments on this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: nb/intel/x4x: Do cosmetic fixes ......................................................................
Patch Set 2:
(1 comment)
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/d... File src/northbridge/intel/x4x/dq_dqs.c:
https://review.coreboot.org/c/coreboot/+/35156/2/src/northbridge/intel/x4x/d... PS2, Line 100:
Why are there newlines here?
When a bunch of lines are packed together, I tend to mix them up: while reading, I jump to the line immediately above or below without realizing. The newlines chop this wall of text into sentence-like blocks:
98: If pi is less than 6, then increment pi by 1.
101: Else, if tap is less than max_tap_val, then set pi to 0 and increment tap by 1.
105: Else, if clk_delay is less than 2, then set pi and tap to 0 and increment clk_delay by 1.
110: Else, if coarse is less than 1, then set pi and tap to 0, decrement clk_delay by 1 and increment coarse by 1.
116: Otherwise, return CB_ERR.
Angel Pons has abandoned this change. ( https://review.coreboot.org/c/coreboot/+/35156 )
Change subject: nb/intel/x4x: Do cosmetic fixes ......................................................................
Abandoned