Tugas Pemograman Parallel Irsyad.docx

  • Uploaded by: Sulthan Abdullah
  • 0
  • 0
  • April 2020
  • PDF

This document was uploaded by user and they confirmed that they have the permission to share it. If you are author or own the copyright of this book, please report to us by using this DMCA report form. Report DMCA


Overview

Download & View Tugas Pemograman Parallel Irsyad.docx as PDF for free.

More details

  • Words: 812
  • Pages: 9
Tugas Pemograman Parallel Benchmark program Pi, Perkalian Matrix dan fibonacci

OLEH:

MUH.IRSYAD ASHARI

(D42116305)

DEPARTEMEN TEKNIK INFORMATIKA FAKULTAS TEKNIK UNIVERSITAS HASANUDDIN 2019

1. Program Pi #include <stdio.h> #include double sum; int checkParallel = 0; void piP(long num_steps, int red){ sum = 0.0; double runTime, step = 1.0/(double) num_steps, start = omp_get_wtime(); if(red){ #pragma omp for reduction(+:sum) for (int i=1;i<= num_steps; i++){ double x = (i-0.5)*step; sum += 4.0/(1.0+x*x); } }else{ double sumP = 0.0; #pragma omp for for (int i=1;i<= num_steps; i++){ double x = (i-0.5)*step; sumP += 4.0/(1.0+x*x); } #pragma omp atomic sum += sumP; } double pi = step * sum; runTime = omp_get_wtime() - start; #pragma omp atomic checkParallel++; #pragma omp single { printf("step = %d, pi = %.10lf, %f s",num_steps,pi,runTime); if(checkParallel == 1) printf("(serial + overhead)"); else printf("(parallel)"); if(red) printf("(with red)\n"); else printf("(non-red)\n"); checkParallel = 0; } } void pi(long num_steps){ double start, runTime, step = 1.0/(double) num_steps; start = omp_get_wtime(); sum = 0.0; for (int i=1;i<= num_steps; i++){

double x = (i-0.5)*step; sum += 4.0/(1.0+x*x); } double pi = step * sum; runTime = omp_get_wtime() - start; printf("step = %d, pi = %.10lf, %f detik(serial)\n",num_steps,pi,runTime); } int main () { long steps = 500000000; for(int i = 0; i < 1; i++){ #pragma omp parallel num_threads(1) piP(steps, 1); pi(steps) #pragma omp parallel piP(steps,1); printf("\n"); } }

Data :

serial+overhead serial overhead parallel speed up 1.586 1.58 0.006 0.39 405.12821 Program efektif karena efisienso waktu speed up hingga 405.128.

2. Matrix #include<stdio.h> #include<stdlib.h> #include #include void MMulT(int n); void MMul(int n); int main(){ int nList[] = {500,1000,2000,4000}; for(int i = 0; i < 4; i++){ MMul(nList[i]); } for(int i = 0; i < 4; i++){ MMulT(nList[i]); } } void MMul(int n){ clock_t start, finish; start = clock(); int checkParallel = 0; double *arrayA = malloc(sizeof(double) * n * n); double *arrayB = malloc(sizeof(double) * n * n); double *result = malloc(sizeof(double) * n * n); double *arrayTemp = malloc(sizeof(double) * n * n); #pragma omp parallel { #pragma omp for for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ int sum = 0; for(int k = 0; k < n; k++){ sum += arrayA[(n*i)+k] * arrayB[(n*k)+j]; } result[(n*i)+j] = sum; } } #pragma omp atomic checkParallel++; } finish = clock(); printf("processing time B not transposed, n = %d", n); if(checkParallel == 1) printf("(serial) : %.7lf\n", ((double)(finish-start) / CLOCKS_PER_SEC)); else printf("(parallel) : %.7lf\n", ((double)(finish-start) / CLOCKS_PER_SEC)); } void MMulT(int n){

clock_t start, finish; start = clock(); int checkParallel = 0; double *arrayA = malloc(sizeof(double) * n * n); double *arrayB = malloc(sizeof(double) * n * n); double *result = malloc(sizeof(double) * n * n); double *arrayTemp = malloc(sizeof(double) * n * n); #pragma omp parallel { //transpose matrix B #pragma omp for for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ arrayTemp[(n*i)+j] = arrayB[(n*j)+i]; } } #pragma omp single { free(arrayB); arrayB = arrayTemp; } #pragma omp for for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ int sum = 0; for(int k = 0; k < n; k++){ sum += arrayA[(n*i)+k] * arrayB[(n*j)+k]; } result[(n*i)+j] = sum; } #pragma omp atomic checkParallel++; } finish = clock(); printf("processing time B transposed, n = %d", n); if(checkParallel == 1) printf("(serial) : %.7lf\n", ((double)(finish-start) / CLOCKS_PER_SEC)); else printf("(parallel) : %.7lf\n", ((double)(finish-start) / CLOCKS_PER_SEC)); }

DATA YANG MUNCUL :

KESIMPULAN : Dapat dilihat dari data hasil percobaan diatas. Processing time yang dibutuhkan jauh lebih cepat apabila program di-run secara parallel.

3.Fibonacci #include<stdio.h> #include #include long fib(int n){ long x,y; if(n < 2) return n; else{ x = fib(n - 1);

y = fib(n - 2); return x + y; } } long fibP(int n, int cut){ long x,y; if(n < 2) return n; else{ #pragma omp task shared(x) { if(cut == 1) x = fib(n - 1); else x = fibP(n - 1, cut-1); } #pragma omp task shared(y) { if(cut == 1) y = fib(n - 2); else y = fibP(n - 2, cut-1); } #pragma omp taskwait return x + y; } } void runner(int n, int s,int proc, int cut){ double start, finish; long result; start = omp_get_wtime(); if(s){ result = fib(n); }else{ #pragma omp parallel num_threads(proc) #pragma omp single result = fibP(n, cut); } finish = omp_get_wtime(); printf("fib(%d) = %ld waktu eksekusi = %.5lf", n, result, (finish start)); if(s) printf("(serial)\n"); else if(proc == 1) printf("(serial + overhead) task num = %d\n",(1<<cut)); else printf("(parallel(%d)) task num = %d\n",proc, (1<<cut));

} int main(){ int nList[] = {42,43,44,45}; for(int j = 0; j < 1; j++){ for(int i = 0; i < 4; i++){ runner(nList[i], 1, 0, 0); } runner(45, 0, 1, 18); runner(45, 0, 1, 19); runner(45, 0, 1, 20); runner(45, 0, 1, 22); runner(45, 0, 1, 24); runner(45, 0, 1, 28); runner(45, 0, 4, 2); } }

Related Documents

Parallel
October 2019 30
Parallel
May 2020 28
Pemograman Web.docx
December 2019 20

More Documents from "vio"