*************************************************************** *************************************************************** *** Titel des Projekts: Estimating the causal effect of collective bargaining on the wage distribution *** Datengrundlage: Gehalt- und Lohnstrukturerhebung (GLS) 2001 und Verdienststrukturerhebung (VSE) 2006 *** *** Dateiname des Programmcodes: 02_Fairlie_Ost.do *** Dateiname des Log-Files: 04_Fairlie_Ost.log *** Dateiname des ado-files: 04_Fairlieselbst_ost.ado *** *** von: Katrin Sommerfeld *** E-Mail: sommerfeld@zew.de *** Tel: 0621-1235 216 *** *** *** Grundriss des Programms: *** *** ********************************************************* * * 02 _ Fairlie-Zerlegung, hier selbst programmiert * * 0. Vorbereitung: Sample und Matrizen generieren * 1. Simple Zerlegung: a: Vorbereiten, b: Anzeigen * 2. Sequentielle Zerlegung: a: Vorbereiten, b: Anzeigen * ********************************************************** display "ado: 02 Fairlie selbst programmiert, mit MEs" capture program drop fairlieselbst program define fairlieselbst set rmsg on ************************************************************************** *** 0. Vorbereitung: Sample quietly probit cover $varlist_dm if year == 2001 gen dsample_01 = 1 if e(sample) quietly probit cover $varlist_dm if year == 2006 gen dsample_06 = 1 if e(sample) keep if dsample_01 == 1 | dsample_06 == 1 gen dtreat=. replace dtreat=0 if dsample_01==1 replace dtreat=1 if dsample_06==1 tab dsample_01 dsample_06, m tab dtreat, m gen one = 1 ******************************************************************** *** 1.a Probits: Simple Predictions, i.e. Xj*bj vorbereiten display "1.a Probits" /* 2001 */ quietly probit cover $varlist_dm if year == 2001 [pweight=weight], vce(robust) matrix b_01 = e(b) matrix list b_01 svmat b_01, names(b_01) display "Check whether only one value, before filling up:" sum b_01* qui gen b01_0 = _b[_cons] forvalues X = 1/42 { egen b01_`X' = total(b_01`X') } margins, dydx(*) /* 2006 */ quietly probit cover $varlist_dm if year == 2006 [pweight=weight], vce(robust) matrix b_06 = e(b) matrix list b_06 svmat b_06, names(b_06) display "Check whether only one value, before filling up:" sum b_06* qui gen b06_0 = _b[_cons] forvalues X = 1/42 { egen b06_`X' = total(b_06`X') } margins, dydx(*) /* Beobachtete Predictions */ *basiert auf varlist: "educ_low educ_high educ_na age tenure size1 size2 size3 size4 male region1 region2 region4 region5 region6 region7 w1 w2 w3 w4 w5 w6 w7 w8 w10 w11 w12 w13 w14 w15 w16 w17 w18 w19 w20 w21 w22 w23 w24 w25 w26 w27 w28 " // public display "Predicted value 2001" qui gen p_x01_b01 = b01_0*one + b_01[1,1]*educ_low_dm + b_01[1,2]*educ_high_dm + b_01[1,3]*educ_na_dm + b_01[1,4]*age_dm + b_01[1,5]*tenure_dm + b_01[1,6]*size1_dm + b_01[1,7]*size2_dm + b_01[1,8]*size3_dm + b_01[1,9]*size4_dm + b_01[1,10]*male_dm + b_01[1,11]*region8_dm + b_01[1,12]*region10_dm + b_01[1,13]*region11_dm + b_01[1,14]*region12_dm + b_01[1,15]*w1_dm + b_01[1,16]*w2_dm + b_01[1,17]*w3_dm + b_01[1,18]*w4_dm + b_01[1,19]*w5_dm + b_01[1,20]*w6_dm + b_01[1,21]*w7_dm + b_01[1,22]*w8_dm + b_01[1,23]*w10_dm + b_01[1,24]*w11_dm + b_01[1,25]*w12_dm + b_01[1,26]*w13_dm + b_01[1,27]*w14_dm + b_01[1,28]*w15_dm + b_01[1,29]*w16_dm + b_01[1,30]*w17_dm + b_01[1,31]*w18_dm + b_01[1,32]*w19_dm + b_01[1,33]*w20_dm + b_01[1,34]*w21_dm + b_01[1,35]*w22_dm + b_01[1,36]*w23_dm + b_01[1,37]*w24_dm + b_01[1,38]*w25_dm + b_01[1,39]*w26_dm + b_01[1,40]*w27_dm + b_01[1,41]*w28_dm if dsample_01==1 qui gen np_x01_b01 = normal(p_x01_b01) // Normalverteilung des Matrixproduktes tabstat np_x01_b01 [aweight=weight] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum np_x01_b01 [aweight=weight] // Durchschnittliche geschätzte Tarifbindungsquote scalar app_x01_b01 = r(mean) /* average predicted probability for 2001 */ display "Predicted value 2006" *gen p_x06_b06 = b06_0*one + b06_1*educ_low + b06_2*educ_high + b06_3*educ_na + b06_4*age + b06_5*tenure + b06_6*size1 + b06_7*size2 + b06_8*size3 + b06_9*size4 + b06_10*male + b06_11*region1 + b06_12*region2 + b06_13*region4 + b06_14*region5 + b06_15*region6 + b06_16*region7 + b06_17*w1 + b06_18*w2 + b06_19*w3 + b06_20*w4 + b06_21*w5 + b06_22*w6 + b06_23*w7 + b06_24*w8 + b06_25*w10 + b06_26*w11 + b06_27*w12 + b06_28*w13 + b06_29*w14 + b06_30*w15 + b06_31*w16 + b06_32*w17 + b06_33*w18 + b06_34*w19 + b06_35*w20 + b06_36*w21 + b06_37*w22 + b06_38*w23 + b06_39*w24 + b06_40*w25 + b06_41*w26 + b06_42*w27 + b06_43*w28 if dsample_06==1 qui gen p_x06_b06 = b06_0*one + b_06[1,1]*educ_low_dm + b_06[1,2]*educ_high_dm + b_06[1,3]*educ_na_dm + b_06[1,4]*age_dm + b_06[1,5]*tenure_dm + b_06[1,6]*size1_dm + b_06[1,7]*size2_dm + b_06[1,8]*size3_dm + b_06[1,9]*size4_dm + b_06[1,10]*male_dm + b_06[1,11]*region8_dm + b_06[1,12]*region10_dm + b_06[1,13]*region11_dm + b_06[1,14]*region12_dm + b_06[1,15]*w1_dm + b_06[1,16]*w2_dm + b_06[1,17]*w3_dm + b_06[1,18]*w4_dm + b_06[1,19]*w5_dm + b_06[1,20]*w6_dm + b_06[1,21]*w7_dm + b_06[1,22]*w8_dm + b_06[1,23]*w10_dm + b_06[1,24]*w11_dm + b_06[1,25]*w12_dm + b_06[1,26]*w13_dm + b_06[1,27]*w14_dm + b_06[1,28]*w15_dm + b_06[1,29]*w16_dm + b_06[1,30]*w17_dm + b_06[1,31]*w18_dm + b_06[1,32]*w19_dm + b_06[1,33]*w20_dm + b_06[1,34]*w21_dm + b_06[1,35]*w22_dm + b_06[1,36]*w23_dm + b_06[1,37]*w24_dm + b_06[1,38]*w25_dm + b_06[1,39]*w26_dm + b_06[1,40]*w27_dm + b_06[1,41]*w28_dm if dsample_06==1 qui gen np_x06_b06 = normal(p_x06_b06) // Normalverteilung des Matrixproduktes tabstat np_x06_b06 [aweight=weight], stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum np_x06_b06 [aweight=weight] // Durchschnittliche geschätzte Tarifbindungsquote scalar app_x06_b06 = r(mean) /* average predicted probability for 2006 */ /* Difference in predicted prob */ scalar diff = app_x06_b06 - app_x01_b01 ******************************************************************* *** 1.b Grobe Zerlegung display "1.b Grobe Zerlegung" display "1. Grobe Zerlegung: Original direction: x06 * b01" qui gen p_x06_b01 = b01_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b01_15*w1_dm + b01_16*w2_dm + b01_17*w3_dm + b01_18*w4_dm + b01_19*w5_dm + b01_20*w6_dm + b01_21*w7_dm + b01_22*w8_dm + b01_23*w10_dm + b01_24*w11_dm + b01_25*w12_dm + b01_26*w13_dm + b01_27*w14_dm + b01_28*w15_dm + b01_29*w16_dm + b01_30*w17_dm + b01_31*w18_dm + b01_32*w19_dm + b01_33*w20_dm + b01_34*w21_dm + b01_35*w22_dm + b01_36*w23_dm + b01_37*w24_dm + b01_38*w25_dm + b01_39*w26_dm + b01_40*w27_dm + b01_41*w28_dm if dsample_06==1 qui gen n_x06_b01 = normal(p_x06_b01) // Normalverteilung des Matrixproduktes tabstat n_x06_b01 [aweight=weight], stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_x06_b01 [aweight=weight] // Durchschnittliche geschätzte Tarifbindungsquote scalar app_x06_b01 = r(mean) /* average predicted probability */ scalar Coef_Ef = app_x06_b06 - app_x06_b01 scalar Char_Ef = app_x06_b01 - app_x01_b01 scalar Residual = diff - Char_Ef - Coef_Ef /* Residual = delta_3 per definition */ scalar Coef_Ef__prozent = Coef_Ef / diff * 100 scalar Char_Ef__prozent = Char_Ef / diff * 100 scalar Residual__prozent = Residual/ diff * 100 /* Residual = delta_3 per definition */ display "diff = " diff display "Coef_Ef = " Coef_Ef " Prozent: " Coef_Ef__prozent display "Char_Ef = " Char_Ef " Prozent: " Char_Ef__prozent *display "Residual = " Residual " Prozent: " Residual__prozent ************************************** display "1. Grobe Zerlegung: Reverse direction: x01 * b06" qui gen p_x01_b06 = b06_0*one + b06_1*educ_low_dm + b06_2*educ_high_dm + b06_3*educ_na_dm + b06_4*age_dm + b06_5*tenure_dm + b06_6*size1_dm + b06_7*size2_dm + b06_8*size3_dm + b06_9*size4_dm + b06_10*male_dm + b06_11*region8_dm + b06_12*region10_dm + b06_13*region11_dm + b06_14*region12_dm + b06_15*w1_dm + b06_16*w2_dm + b06_17*w3_dm + b06_18*w4_dm + b06_19*w5_dm + b06_20*w6_dm + b06_21*w7_dm + b06_22*w8_dm + b06_23*w10_dm + b06_24*w11_dm + b06_25*w12_dm + b06_26*w13_dm + b06_27*w14_dm + b06_28*w15_dm + b06_29*w16_dm + b06_30*w17_dm + b06_31*w18_dm + b06_32*w19_dm + b06_33*w20_dm + b06_34*w21_dm + b06_35*w22_dm + b06_36*w23_dm + b06_37*w24_dm + b06_38*w25_dm + b06_39*w26_dm + b06_40*w27_dm + b06_41*w28_dm if dsample_01==1 qui gen np_x01_b06 = normal(p_x01_b06) // Normalverteilung des Matrixproduktes tabstat np_x01_b06 [aweight=weight], stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum np_x01_b06 [aweight=weight] // Durchschnittliche geschätzte Tarifbindungsquote scalar app_x01_b06 = r(mean) /* average predicted probability */ scalar Char_Ef_rev = app_x06_b06 - app_x01_b06 scalar Coef_Ef_rev = app_x01_b06 - app_x01_b01 scalar Residual_rev = diff - Char_Ef_rev - Coef_Ef_rev /* Residual = delta_3 per definition */ scalar Char_Ef_rev__prozent = Char_Ef_rev / diff * 100 scalar Coef_Ef_rev__prozent = Coef_Ef_rev / diff * 100 scalar Residual_rev__prozent = Residual_rev/ diff * 100 /* Residual = delta_3 per definition */ display "diff = " diff display "Coef_Ef reverse = " Coef_Ef_rev " Prozent: " Coef_Ef_rev__prozent display "Char_Ef reverse = " Char_Ef_rev " Prozent: " Char_Ef_rev__prozent *display "Residual reverse = " Residual_rev " Prozent: " Residual_rev__prozent ******************************************************************** *** 2.a Sequentielle Zerlegung vorbereiten /* Zunächst werden die Counterfactuals berechnet */ * Change beta_ind from 2006 to 2001 /*** Zu delta1 bzw. delta2 ***/ * counter1 = xpersonal_06 * bpersonal_01' + xfirm_06 * bfirm_06' + xwz_06 * bwz_06' + mat_c06 qui gen counter1help = b06_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b06_6*size1_dm + b06_7*size2_dm + b06_8*size3_dm + b06_9*size4_dm + b06_10*male_dm + b06_11*region8_dm + b06_12*region10_dm + b06_13*region11_dm + b06_14*region12_dm + b06_15*w1_dm + b06_16*w2_dm + b06_17*w3_dm + b06_18*w4_dm + b06_19*w5_dm + b06_20*w6_dm + b06_21*w7_dm + b06_22*w8_dm + b06_23*w10_dm + b06_24*w11_dm + b06_25*w12_dm + b06_26*w13_dm + b06_27*w14_dm + b06_28*w15_dm + b06_29*w16_dm + b06_30*w17_dm + b06_31*w18_dm + b06_32*w19_dm + b06_33*w20_dm + b06_34*w21_dm + b06_35*w22_dm + b06_36*w23_dm + b06_37*w24_dm + b06_38*w25_dm + b06_39*w26_dm + b06_40*w27_dm + b06_41*w28_dm if dsample_06==1 qui gen n_counter1 = normal(counter1help) tabstat n_counter1 [aweight=weight] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter1 [aweight=weight] scalar app_counter1 = r(mean) * Change beta_firm from 2006 to 2001 /*** Zu delta2 bzw. delta3 ***/ * counter2 = xpersonal_06 * bpersonal_01' + xfirm_06 * bfirm_01' + xwz_06 * bwz_06' + mat_c06 qui gen counter2help = b06_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b06_15*w1_dm + b06_16*w2_dm + b06_17*w3_dm + b06_18*w4_dm + b06_19*w5_dm + b06_20*w6_dm + b06_21*w7_dm + b06_22*w8_dm + b06_23*w10_dm + b06_24*w11_dm + b06_25*w12_dm + b06_26*w13_dm + b06_27*w14_dm + b06_28*w15_dm + b06_29*w16_dm + b06_30*w17_dm + b06_31*w18_dm + b06_32*w19_dm + b06_33*w20_dm + b06_34*w21_dm + b06_35*w22_dm + b06_36*w23_dm + b06_37*w24_dm + b06_38*w25_dm + b06_39*w26_dm + b06_40*w27_dm + b06_41*w28_dm if dsample_06==1 qui gen n_counter2 = normal(counter2help) tabstat n_counter2 [aweight=weight] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter2 [aweight=weight] scalar app_counter2 = r(mean) * Change beta_wz from 2006 to 2001 /*** Zu delta3 bzw. delta4 ***/ * counter3 = xpersonal_06 * bpersonal_01' + xfirm_06 * bfirm_01' + xwz_06 * bwz_01' + mat_c06 qui gen counter3help = b06_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b01_15*w1_dm + b01_16*w2_dm + b01_17*w3_dm + b01_18*w4_dm + b01_19*w5_dm + b01_20*w6_dm + b01_21*w7_dm + b01_22*w8_dm + b01_23*w10_dm + b01_24*w11_dm + b01_25*w12_dm + b01_26*w13_dm + b01_27*w14_dm + b01_28*w15_dm + b01_29*w16_dm + b01_30*w17_dm + b01_31*w18_dm + b01_32*w19_dm + b01_33*w20_dm + b01_34*w21_dm + b01_35*w22_dm + b01_36*w23_dm + b01_37*w24_dm + b01_38*w25_dm + b01_39*w26_dm + b01_40*w27_dm + b01_41*w28_dm if dsample_06==1 qui gen n_counter3 = normal(counter3help) tabstat n_counter3 [aweight=weight] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter3 [aweight=weight] scalar app_counter3 = r(mean) * Change constant from 2006 to 2001 /*** Zu delta4 bzw. delta5 ***/ * counter4 = xpersonal_06 * bpersonal_01' + xfirm_06 * bfirm_01' + xwz_06 * bwz_01' + mat_c01 qui gen counter4help = b01_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b01_15*w1_dm + b01_16*w2_dm + b01_17*w3_dm + b01_18*w4_dm + b01_19*w5_dm + b01_20*w6_dm + b01_21*w7_dm + b01_22*w8_dm + b01_23*w10_dm + b01_24*w11_dm + b01_25*w12_dm + b01_26*w13_dm + b01_27*w14_dm + b01_28*w15_dm + b01_29*w16_dm + b01_30*w17_dm + b01_31*w18_dm + b01_32*w19_dm + b01_33*w20_dm + b01_34*w21_dm + b01_35*w22_dm + b01_36*w23_dm + b01_37*w24_dm + b01_38*w25_dm + b01_39*w26_dm + b01_40*w27_dm + b01_41*w28_dm if dsample_06==1 qui gen n_counter4 = normal(counter4help) tabstat n_counter4 [aweight=weight] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter4 [aweight=weight] scalar app_counter4 = r(mean) /*** From now on: Matching required ***/ gen uni = uniform() sort uni tab dtreat, m display "Step 5: Change Wirtschaftszweig from 2006 to 2001 by PS-Matching" * Change Wirtschaftszweig from 2006 to 2001 /*** Zu delta5 bzw. delta6 ***/ * Match to every 2006-observation (='treatment') a 2001-observation (='control') display "5. Matching by kernel" gsort /*-dtreat*/ uni psmatch2 dtreat i.educ_low i.educ_high i.educ_na c.age c.tenure i.size1 i.size2 i.size3 i.size4 i.region8 i.region10 i.region11 i.region12, kernel kerneltype(normal) *tab _treated tab _support *display "tab _weight if _treated==0 :" *tabstat _weight if _treated==0, stat(n mean sd p50) display "5.B Note: N.obs for _weight and for weight6 have to be identical in the control group!" qui gen weight5 = weight * _weight qui replace weight5 = . if _treated==1 tabstat weight5, stat(n mean sd p50) cap drop counter5help n_counter5 qui gen counter5help = b01_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b01_15*w1_dm + b01_16*w2_dm + b01_17*w3_dm + b01_18*w4_dm + b01_19*w5_dm + b01_20*w6_dm + b01_21*w7_dm + b01_22*w8_dm + b01_23*w10_dm + b01_24*w11_dm + b01_25*w12_dm + b01_26*w13_dm + b01_27*w14_dm + b01_28*w15_dm + b01_29*w16_dm + b01_30*w17_dm + b01_31*w18_dm + b01_32*w19_dm + b01_33*w20_dm + b01_34*w21_dm + b01_35*w22_dm + b01_36*w23_dm + b01_37*w24_dm + b01_38*w25_dm + b01_39*w26_dm + b01_40*w27_dm + b01_41*w28_dm if weight5<. // if dsample_06==1 qui gen n_counter5 = normal(counter5help) tabstat n_counter5 [aweight=weight5] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter5 [aweight=weight5] scalar app_counter5 = r(mean) drop weight5 ************************************************************************************** display "Step 6: Change Firm characteristics from 2006 to 2001 by PS-Matching" * Change Firm characteristics from 2006 to 2001 /*** Zu delta6 bzw. delta7 ***/ * Match to every 2006-observation (='treatment') a 2001-observation (='control') display "6. Matching by kernel" gsort /*-dtreat*/ uni // Note: gsort allows sorting in descending order with "-" psmatch2 dtreat i.educ_low i.educ_high i.educ_na c.age c.tenure, kernel kerneltype(normal) tab _support qui gen weight6 = weight * _weight qui replace weight6 = . if _treated==1 display "6.B Note: N.obs for _weight and for weight6 have to be identical in the control group!" tabstat weight6, stat(n mean sd p50) cap drop counter6help n_counter6 qui gen counter6help = b01_0*one + b01_1*educ_low_dm + b01_2*educ_high_dm + b01_3*educ_na_dm + b01_4*age_dm + b01_5*tenure_dm + b01_6*size1_dm + b01_7*size2_dm + b01_8*size3_dm + b01_9*size4_dm + b01_10*male_dm + b01_11*region8_dm + b01_12*region10_dm + b01_13*region11_dm + b01_14*region12_dm + b01_15*w1_dm + b01_16*w2_dm + b01_17*w3_dm + b01_18*w4_dm + b01_19*w5_dm + b01_20*w6_dm + b01_21*w7_dm + b01_22*w8_dm + b01_23*w10_dm + b01_24*w11_dm + b01_25*w12_dm + b01_26*w13_dm + b01_27*w14_dm + b01_28*w15_dm + b01_29*w16_dm + b01_30*w17_dm + b01_31*w18_dm + b01_32*w19_dm + b01_33*w20_dm + b01_34*w21_dm + b01_35*w22_dm + b01_36*w23_dm + b01_37*w24_dm + b01_38*w25_dm + b01_39*w26_dm + b01_40*w27_dm + b01_41*w28_dm if weight6<. // if dsample_06==1 qui gen n_counter6 = normal(counter6help) tabstat n_counter6 [aweight=weight6] , stat(n mean sd p1 p5 p10 p25 p50 p75 p90 p95 p99) qui sum n_counter6 [aweight=weight6] scalar app_counter6 = r(mean) drop weight6 ******************************************************************** *** 2.b Sequentielle Zerlegung anzeigen /* Absolute Changes */ scalar delta1 = app_x06_b06 - app_counter1 // Beta individuell scalar delta2 = app_counter1 - app_counter2 // Beta Firm scalar delta3 = app_counter2 - app_counter3 // Beta Wirtschaftszweig scalar delta4 = app_counter3 - app_counter4 // Beta Konstante scalar delta5 = app_counter4 - app_counter5 // Charakteristikum Wirtschaftszweig scalar delta6 = app_counter5 - app_counter6 // Firmen-Charakteristika scalar delta7 = app_counter6 - app_x01_b01 // individuelle Charakteristika /* Percentages */ scalar delta1__prozent = (delta1/ diff) * 100 scalar delta2__prozent = (delta2/ diff) * 100 scalar delta3__prozent = (delta3/ diff) * 100 scalar delta4__prozent = (delta4/ diff) * 100 scalar delta5__prozent = (delta5/ diff) * 100 scalar delta6__prozent = (delta6/ diff) * 100 scalar delta7__prozent = (delta7/ diff) * 100 scalar list /* Display */ display "diff = " diff display "Coef_Ef = " Coef_Ef " Prozent: " Coef_Ef__prozent display "Char_Ef = " Char_Ef " Prozent: " Char_Ef__prozent display "Residual = " Residual " Prozent: " Residual__prozent display "***" display "Betas individuell: delta1 = " delta1 " Prozent: " delta1__prozent display "Betas Firma: delta2 = " delta2 " Prozent: " delta2__prozent display "Betas Wirtschaftszweig: delta3 = " delta3 " Prozent: " delta3__prozent display "Beta Konstante: delta4 = " delta4 " Prozent: " delta4__prozent display "Charakteristikum Wirtschaftszweig: delta5 = " delta5 " Prozent: " delta5__prozent display "Firmen-Charakteristika: delta6 = " delta6 " Prozent: " delta6__prozent display "individuelle Charakteristika: delta7 = " delta7 " Prozent: " delta7__prozent *** Reverse direction of sequential decomposition follows in a separate ado-file end